aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/evm23
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff7
-rw-r--r--Documentation/devicetree/bindings/arm/l2cc.txt44
-rw-r--r--Documentation/hwmon/coretemp14
-rw-r--r--Documentation/kernel-parameters.txt13
-rw-r--r--Documentation/networking/dmfe.txt3
-rw-r--r--Documentation/networking/ip-sysctl.txt4
-rw-r--r--Documentation/networking/scaling.txt12
-rw-r--r--Documentation/vm/transhuge.txt7
-rw-r--r--MAINTAINERS32
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig71
-rw-r--r--arch/arm/Kconfig.debug91
-rw-r--r--arch/arm/Makefile3
-rw-r--r--arch/arm/boot/Makefile9
-rw-r--r--arch/arm/boot/compressed/Makefile8
-rw-r--r--arch/arm/boot/dts/tegra-harmony.dts12
-rw-r--r--arch/arm/boot/dts/tegra-seaboard.dts6
-rw-r--r--arch/arm/common/gic.c17
-rw-r--r--arch/arm/common/pl330.c2
-rw-r--r--arch/arm/common/vic.c4
-rw-r--r--arch/arm/configs/integrator_defconfig19
-rw-r--r--arch/arm/include/asm/Kbuild17
-rw-r--r--arch/arm/include/asm/auxvec.h4
-rw-r--r--arch/arm/include/asm/bitsperlong.h1
-rw-r--r--arch/arm/include/asm/bug.h55
-rw-r--r--arch/arm/include/asm/cachetype.h5
-rw-r--r--arch/arm/include/asm/cputime.h6
-rw-r--r--arch/arm/include/asm/cputype.h6
-rw-r--r--arch/arm/include/asm/device.h3
-rw-r--r--arch/arm/include/asm/dma-mapping.h2
-rw-r--r--arch/arm/include/asm/ecard.h1
-rw-r--r--arch/arm/include/asm/emergency-restart.h6
-rw-r--r--arch/arm/include/asm/errno.h6
-rw-r--r--arch/arm/include/asm/exception.h19
-rw-r--r--arch/arm/include/asm/futex.h34
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h42
-rw-r--r--arch/arm/include/asm/io.h38
-rw-r--r--arch/arm/include/asm/ioctl.h1
-rw-r--r--arch/arm/include/asm/irq_regs.h1
-rw-r--r--arch/arm/include/asm/kdebug.h1
-rw-r--r--arch/arm/include/asm/local.h1
-rw-r--r--arch/arm/include/asm/local64.h1
-rw-r--r--arch/arm/include/asm/localtimer.h6
-rw-r--r--arch/arm/include/asm/mach/arch.h3
-rw-r--r--arch/arm/include/asm/memory.h7
-rw-r--r--arch/arm/include/asm/module.h4
-rw-r--r--arch/arm/include/asm/outercache.h7
-rw-r--r--arch/arm/include/asm/page.h42
-rw-r--r--arch/arm/include/asm/percpu.h6
-rw-r--r--arch/arm/include/asm/pgalloc.h4
-rw-r--r--arch/arm/include/asm/pgtable-2level-hwdef.h93
-rw-r--r--arch/arm/include/asm/pgtable-2level-types.h67
-rw-r--r--arch/arm/include/asm/pgtable-2level.h143
-rw-r--r--arch/arm/include/asm/pgtable-hwdef.h77
-rw-r--r--arch/arm/include/asm/pgtable.h141
-rw-r--r--arch/arm/include/asm/poll.h1
-rw-r--r--arch/arm/include/asm/resource.h6
-rw-r--r--arch/arm/include/asm/sections.h1
-rw-r--r--arch/arm/include/asm/siginfo.h6
-rw-r--r--arch/arm/include/asm/sizes.h21
-rw-r--r--arch/arm/include/asm/smp.h11
-rw-r--r--arch/arm/include/asm/system.h11
-rw-r--r--arch/arm/include/asm/tlbflush.h4
-rw-r--r--arch/arm/include/asm/topology.h33
-rw-r--r--arch/arm/include/asm/unistd.h4
-rw-r--r--arch/arm/kernel/Makefile3
-rw-r--r--arch/arm/kernel/armksyms.c3
-rw-r--r--arch/arm/kernel/asm-offsets.c12
-rw-r--r--arch/arm/kernel/bios32.c9
-rw-r--r--arch/arm/kernel/debug.S4
-rw-r--r--arch/arm/kernel/ecard.c36
-rw-r--r--arch/arm/kernel/entry-armv.S44
-rw-r--r--arch/arm/kernel/head.S135
-rw-r--r--arch/arm/kernel/irq.c2
-rw-r--r--arch/arm/kernel/machine_kexec.c35
-rw-r--r--arch/arm/kernel/module.c2
-rw-r--r--arch/arm/kernel/perf_event_v7.c4
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/setup.c37
-rw-r--r--arch/arm/kernel/smp.c55
-rw-r--r--arch/arm/kernel/smp_scu.c12
-rw-r--r--arch/arm/kernel/time.c6
-rw-r--r--arch/arm/kernel/topology.c148
-rw-r--r--arch/arm/kernel/traps.c32
-rw-r--r--arch/arm/kernel/vmlinux.lds.S18
-rw-r--r--arch/arm/lib/backtrace.S6
-rw-r--r--arch/arm/lib/div64.S8
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c1
-rw-r--r--arch/arm/mach-at91/Makefile.boot6
-rw-r--r--arch/arm/mach-bcmring/Makefile.boot2
-rw-r--r--arch/arm/mach-bcmring/arch.c4
-rw-r--r--arch/arm/mach-clps711x/Makefile.boot2
-rw-r--r--arch/arm/mach-clps711x/clep7312.c3
-rw-r--r--arch/arm/mach-clps711x/edb7211-arch.c3
-rw-r--r--arch/arm/mach-clps711x/fortunet.c3
-rw-r--r--arch/arm/mach-clps711x/p720t.c3
-rw-r--r--arch/arm/mach-cns3xxx/Makefile.boot2
-rw-r--r--arch/arm/mach-davinci/Makefile.boot4
-rw-r--r--arch/arm/mach-dove/Makefile.boot2
-rw-r--r--arch/arm/mach-dove/common.c2
-rw-r--r--arch/arm/mach-ebsa110/Makefile.boot2
-rw-r--r--arch/arm/mach-ebsa110/include/mach/io.h2
-rw-r--r--arch/arm/mach-ep93xx/Makefile.boot10
-rw-r--r--arch/arm/mach-exynos4/Kconfig1
-rw-r--r--arch/arm/mach-exynos4/Makefile.boot2
-rw-r--r--arch/arm/mach-exynos4/clock.c8
-rw-r--r--arch/arm/mach-exynos4/mct.c10
-rw-r--r--arch/arm/mach-exynos4/platsmp.c12
-rw-r--r--arch/arm/mach-exynos4/setup-keypad.c11
-rw-r--r--arch/arm/mach-footbridge/Kconfig4
-rw-r--r--arch/arm/mach-footbridge/Makefile.boot2
-rw-r--r--arch/arm/mach-footbridge/cats-hw.c3
-rw-r--r--arch/arm/mach-footbridge/include/mach/io.h2
-rw-r--r--arch/arm/mach-footbridge/netwinder-hw.c3
-rw-r--r--arch/arm/mach-gemini/Makefile.boot4
-rw-r--r--arch/arm/mach-h720x/Makefile.boot2
-rw-r--r--arch/arm/mach-imx/Makefile.boot10
-rw-r--r--arch/arm/mach-integrator/Makefile.boot2
-rw-r--r--arch/arm/mach-integrator/core.c4
-rw-r--r--arch/arm/mach-integrator/include/mach/io.h2
-rw-r--r--arch/arm/mach-integrator/include/mach/platform.h12
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c88
-rw-r--r--arch/arm/mach-integrator/pci_v3.c2
-rw-r--r--arch/arm/mach-iop13xx/Makefile.boot2
-rw-r--r--arch/arm/mach-iop32x/Makefile.boot2
-rw-r--r--arch/arm/mach-iop33x/Makefile.boot2
-rw-r--r--arch/arm/mach-ixp2000/Makefile.boot2
-rw-r--r--arch/arm/mach-ixp23xx/Makefile.boot2
-rw-r--r--arch/arm/mach-ixp4xx/Makefile.boot2
-rw-r--r--arch/arm/mach-ixp4xx/common-pci.c3
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/io.h2
-rw-r--r--arch/arm/mach-kirkwood/Makefile.boot2
-rw-r--r--arch/arm/mach-ks8695/Makefile.boot2
-rw-r--r--arch/arm/mach-lpc32xx/Makefile.boot2
-rw-r--r--arch/arm/mach-mmp/Makefile.boot2
-rw-r--r--arch/arm/mach-mmp/aspenite.c2
-rw-r--r--arch/arm/mach-mmp/ttc_dkb.c2
-rw-r--r--arch/arm/mach-msm/Makefile.boot2
-rw-r--r--arch/arm/mach-msm/board-halibut.c4
-rw-r--r--arch/arm/mach-msm/board-mahimahi.c4
-rw-r--r--arch/arm/mach-msm/board-msm7x30.c22
-rw-r--r--arch/arm/mach-msm/board-msm8960.c22
-rw-r--r--arch/arm/mach-msm/board-msm8x60.c25
-rw-r--r--arch/arm/mach-msm/board-sapphire.c4
-rw-r--r--arch/arm/mach-msm/board-trout.c4
-rw-r--r--arch/arm/mach-msm/include/mach/memory.h6
-rw-r--r--arch/arm/mach-msm/platsmp.c6
-rw-r--r--arch/arm/mach-mv78xx0/Makefile.boot2
-rw-r--r--arch/arm/mach-mx5/Makefile.boot6
-rw-r--r--arch/arm/mach-mxs/Makefile.boot2
-rw-r--r--arch/arm/mach-netx/Makefile.boot2
-rw-r--r--arch/arm/mach-nomadik/Makefile.boot2
-rw-r--r--arch/arm/mach-nuc93x/Makefile.boot2
-rw-r--r--arch/arm/mach-omap1/Makefile.boot2
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-omap2/Makefile.boot2
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c3
-rw-r--r--arch/arm/mach-omap2/hsmmc.c12
-rw-r--r--arch/arm/mach-omap2/omap-smp.c10
-rw-r--r--arch/arm/mach-omap2/usb-musb.c3
-rw-r--r--arch/arm/mach-orion5x/Makefile.boot2
-rw-r--r--arch/arm/mach-orion5x/common.c4
-rw-r--r--arch/arm/mach-orion5x/common.h4
-rw-r--r--arch/arm/mach-pnx4008/Makefile.boot2
-rw-r--r--arch/arm/mach-prima2/Makefile.boot2
-rw-r--r--arch/arm/mach-pxa/Makefile.boot2
-rw-r--r--arch/arm/mach-pxa/cm-x300.c4
-rw-r--r--arch/arm/mach-pxa/corgi.c4
-rw-r--r--arch/arm/mach-pxa/eseries.c3
-rw-r--r--arch/arm/mach-pxa/eseries.h3
-rw-r--r--arch/arm/mach-pxa/irq.c2
-rw-r--r--arch/arm/mach-pxa/poodle.c4
-rw-r--r--arch/arm/mach-pxa/saar.c2
-rw-r--r--arch/arm/mach-pxa/spitz.c4
-rw-r--r--arch/arm/mach-pxa/tosa.c4
-rw-r--r--arch/arm/mach-realview/Makefile.boot4
-rw-r--r--arch/arm/mach-realview/core.c3
-rw-r--r--arch/arm/mach-realview/core.h4
-rw-r--r--arch/arm/mach-realview/include/mach/board-pb1176.h1
-rw-r--r--arch/arm/mach-realview/platsmp.c10
-rw-r--r--arch/arm/mach-realview/realview_pb1176.c48
-rw-r--r--arch/arm/mach-realview/realview_pbx.c6
-rw-r--r--arch/arm/mach-rpc/Makefile.boot2
-rw-r--r--arch/arm/mach-rpc/include/mach/hardware.h25
-rw-r--r--arch/arm/mach-rpc/include/mach/io.h193
-rw-r--r--arch/arm/mach-rpc/riscpc.c2
-rw-r--r--arch/arm/mach-s3c2410/Makefile.boot4
-rw-r--r--arch/arm/mach-s3c2410/include/mach/io.h2
-rw-r--r--arch/arm/mach-s3c2410/s3c2410.c2
-rw-r--r--arch/arm/mach-s3c2412/mach-smdk2413.c3
-rw-r--r--arch/arm/mach-s3c2412/mach-vstms.c5
-rw-r--r--arch/arm/mach-s3c2412/s3c2412.c2
-rw-r--r--arch/arm/mach-s3c2416/s3c2416.c2
-rw-r--r--arch/arm/mach-s3c2440/s3c2440.c2
-rw-r--r--arch/arm/mach-s3c2440/s3c2442.c2
-rw-r--r--arch/arm/mach-s3c2443/clock.c2
-rw-r--r--arch/arm/mach-s3c64xx/Makefile.boot2
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c39
-rw-r--r--arch/arm/mach-s5p64x0/Makefile.boot2
-rw-r--r--arch/arm/mach-s5pc100/Makefile.boot2
-rw-r--r--arch/arm/mach-s5pv210/Makefile.boot2
-rw-r--r--arch/arm/mach-s5pv210/clock.c6
-rw-r--r--arch/arm/mach-sa1100/Makefile1
-rw-r--r--arch/arm/mach-sa1100/Makefile.boot5
-rw-r--r--arch/arm/mach-sa1100/assabet.c3
-rw-r--r--arch/arm/mach-sa1100/include/mach/io.h6
-rw-r--r--arch/arm/mach-sa1100/include/mach/simpad.h94
-rw-r--r--arch/arm/mach-sa1100/leds-simpad.c100
-rw-r--r--arch/arm/mach-sa1100/leds.c2
-rw-r--r--arch/arm/mach-sa1100/leds.h1
-rw-r--r--arch/arm/mach-sa1100/simpad.c213
-rw-r--r--arch/arm/mach-shark/Makefile.boot2
-rw-r--r--arch/arm/mach-shmobile/Makefile.boot2
-rw-r--r--arch/arm/mach-shmobile/platsmp.c6
-rw-r--r--arch/arm/mach-spear3xx/Makefile.boot2
-rw-r--r--arch/arm/mach-spear6xx/Makefile.boot2
-rw-r--r--arch/arm/mach-tcc8k/Makefile.boot2
-rw-r--r--arch/arm/mach-tegra/Makefile.boot2
-rw-r--r--arch/arm/mach-tegra/board-harmony.c4
-rw-r--r--arch/arm/mach-tegra/board-paz00.c4
-rw-r--r--arch/arm/mach-tegra/board-trimslice.c4
-rw-r--r--arch/arm/mach-tegra/cpu-tegra.c1
-rw-r--r--arch/arm/mach-tegra/platsmp.c8
-rw-r--r--arch/arm/mach-u300/Makefile.boot4
-rw-r--r--arch/arm/mach-ux500/Kconfig1
-rw-r--r--arch/arm/mach-ux500/Makefile.boot2
-rw-r--r--arch/arm/mach-ux500/platsmp.c10
-rw-r--r--arch/arm/mach-versatile/Makefile.boot2
-rw-r--r--arch/arm/mach-vexpress/Makefile.boot2
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c6
-rw-r--r--arch/arm/mach-vexpress/hotplug.c9
-rw-r--r--arch/arm/mach-vexpress/include/mach/io.h2
-rw-r--r--arch/arm/mach-vt8500/Makefile.boot2
-rw-r--r--arch/arm/mach-vt8500/include/mach/io.h2
-rw-r--r--arch/arm/mach-w90x900/Makefile.boot2
-rw-r--r--arch/arm/mach-zynq/Makefile.boot2
-rw-r--r--arch/arm/mm/alignment.c20
-rw-r--r--arch/arm/mm/cache-l2x0.c216
-rw-r--r--arch/arm/mm/cache-v7.S20
-rw-r--r--arch/arm/mm/dma-mapping.c8
-rw-r--r--arch/arm/mm/fault.c1
-rw-r--r--arch/arm/mm/init.c7
-rw-r--r--arch/arm/mm/ioremap.c21
-rw-r--r--arch/arm/mm/mm.h4
-rw-r--r--arch/arm/mm/mmu.c18
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/plat-omap/Kconfig1
-rw-r--r--arch/arm/plat-s5p/irq-gpioint.c15
-rw-r--r--arch/arm/plat-samsung/clock.c11
-rw-r--r--arch/arm/plat-samsung/include/plat/clock.h8
-rw-r--r--arch/arm/plat-samsung/include/plat/watchdog-reset.h10
-rw-r--r--arch/arm/tools/mach-types20
-rw-r--r--arch/arm/vfp/Makefile2
-rw-r--r--arch/m68k/kernel/vmlinux.lds_no.S1
-rw-r--r--arch/m68k/mac/macints.c2
-rw-r--r--arch/m68k/mac/misc.c40
-rw-r--r--arch/mips/Kconfig6
-rw-r--r--arch/mips/alchemy/common/platform.c2
-rw-r--r--arch/mips/alchemy/common/power.c22
-rw-r--r--arch/mips/alchemy/devboards/bcsr.c4
-rw-r--r--arch/mips/alchemy/devboards/db1200/setup.c7
-rw-r--r--arch/mips/ar7/irq.c3
-rw-r--r--arch/mips/bcm63xx/irq.c1
-rw-r--r--arch/mips/cobalt/irq.c1
-rw-r--r--arch/mips/dec/setup.c4
-rw-r--r--arch/mips/emma/markeins/irq.c2
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-powertv/dma-coherence.h1
-rw-r--r--arch/mips/include/asm/stackframe.h4
-rw-r--r--arch/mips/jz4740/gpio.c52
-rw-r--r--arch/mips/kernel/ftrace.c39
-rw-r--r--arch/mips/kernel/i8259.c3
-rw-r--r--arch/mips/kernel/linux32.c7
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/signal.c3
-rw-r--r--arch/mips/kernel/traps.c16
-rw-r--r--arch/mips/kernel/vpe.c2
-rw-r--r--arch/mips/lantiq/irq.c6
-rw-r--r--arch/mips/lantiq/xway/ebu.c1
-rw-r--r--arch/mips/lantiq/xway/pmu.c1
-rw-r--r--arch/mips/lasat/interrupt.c1
-rw-r--r--arch/mips/loongson/fuloong-2e/irq.c1
-rw-r--r--arch/mips/loongson/lemote-2f/irq.c3
-rw-r--r--arch/mips/mm/mmap.c48
-rw-r--r--arch/mips/mm/tlbex.c6
-rw-r--r--arch/mips/mti-malta/malta-int.c6
-rw-r--r--arch/mips/netlogic/xlr/Makefile2
-rw-r--r--arch/mips/pci/pci-lantiq.c9
-rw-r--r--arch/mips/pci/pci-rc32434.c2
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_irq.c6
-rw-r--r--arch/mips/pnx8550/common/int.c2
-rw-r--r--arch/mips/sgi-ip22/ip22-int.c10
-rw-r--r--arch/mips/sni/rm200.c1
-rw-r--r--arch/mips/vr41xx/common/irq.c1
-rw-r--r--arch/powerpc/platforms/powermac/pci.c14
-rw-r--r--arch/s390/include/asm/elf.h3
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/kernel/asm-offsets.c3
-rw-r--r--arch/s390/kernel/entry64.S6
-rw-r--r--arch/s390/kvm/kvm-s390.c5
-rw-r--r--arch/s390/mm/pgtable.c17
-rw-r--r--arch/sparc/include/asm/pgtsrmmu.h2
-rw-r--r--arch/sparc/include/asm/spitfire.h2
-rw-r--r--arch/sparc/include/asm/xor_64.h4
-rw-r--r--arch/sparc/kernel/cpu.c12
-rw-r--r--arch/sparc/kernel/cpumap.c2
-rw-r--r--arch/sparc/kernel/head_64.S25
-rw-r--r--arch/sparc/kernel/pci.c3
-rw-r--r--arch/sparc/kernel/process_32.c3
-rw-r--r--arch/sparc/kernel/process_64.c3
-rw-r--r--arch/sparc/kernel/setup_32.c2
-rw-r--r--arch/sparc/kernel/setup_64.c18
-rw-r--r--arch/sparc/kernel/signal32.c21
-rw-r--r--arch/sparc/kernel/signal_32.c32
-rw-r--r--arch/sparc/kernel/signal_64.c32
-rw-r--r--arch/sparc/mm/init_64.c5
-rw-r--r--arch/sparc/mm/leon_mm.c2
-rw-r--r--arch/tile/kernel/intvec_32.S2
-rw-r--r--arch/tile/lib/atomic_asm_32.S2
-rw-r--r--arch/x86/include/asm/alternative-asm.h1
-rw-r--r--arch/x86/include/asm/alternative.h4
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/xen/page.h6
-rw-r--r--arch/x86/kernel/kprobes.c4
-rw-r--r--arch/x86/kernel/rtc.c23
-rw-r--r--arch/x86/kernel/vsyscall_64.c2
-rw-r--r--arch/x86/kvm/emulate.c2
-rw-r--r--arch/x86/kvm/mmu.c3
-rw-r--r--arch/x86/mm/init.c3
-rw-r--r--arch/x86/pci/acpi.c11
-rw-r--r--arch/x86/pci/xen.c32
-rw-r--r--arch/x86/platform/mrst/mrst.c22
-rw-r--r--arch/x86/platform/mrst/vrtc.c9
-rw-r--r--arch/x86/xen/Kconfig11
-rw-r--r--arch/x86/xen/enlighten.c1
-rw-r--r--arch/x86/xen/mmu.c58
-rw-r--r--arch/x86/xen/p2m.c128
-rw-r--r--arch/x86/xen/setup.c282
-rw-r--r--arch/x86/xen/smp.c1
-rw-r--r--block/blk-cgroup.c37
-rw-r--r--block/blk-core.c28
-rw-r--r--block/blk-softirq.c2
-rw-r--r--block/blk-sysfs.c15
-rw-r--r--block/cfq-iosched.c20
-rw-r--r--crypto/ghash-generic.c6
-rw-r--r--drivers/base/power/clock_ops.c75
-rw-r--r--drivers/block/floppy.c8
-rw-r--r--drivers/block/xen-blkback/blkback.c2
-rw-r--r--drivers/block/xen-blkback/common.h2
-rw-r--r--drivers/block/xen-blkback/xenbus.c6
-rw-r--r--drivers/bluetooth/btusb.c6
-rw-r--r--drivers/bluetooth/btwilink.c16
-rw-r--r--drivers/char/apm-emulation.c16
-rw-r--r--drivers/char/tpm/Kconfig1
-rw-r--r--drivers/char/tpm/tpm.c12
-rw-r--r--drivers/char/tpm/tpm_nsc.c2
-rw-r--r--drivers/firewire/ohci.c3
-rw-r--r--drivers/gpio/gpio-omap.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c22
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c88
-rw-r--r--drivers/gpu/drm/radeon/atom.c15
-rw-r--r--drivers/gpu/drm/radeon/atom.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c28
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c58
-rw-r--r--drivers/gpu/drm/radeon/ni.c44
-rw-r--r--drivers/gpu/drm/radeon/r100.c22
-rw-r--r--drivers/gpu/drm/radeon/r200.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c7
-rw-r--r--drivers/gpu/drm/radeon/rv770.c51
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/hid/Kconfig42
-rw-r--r--drivers/hid/Makefile4
-rw-r--r--drivers/hid/hid-apple.c15
-rw-r--r--drivers/hid/hid-axff.c36
-rw-r--r--drivers/hid/hid-core.c79
-rw-r--r--drivers/hid/hid-debug.c5
-rw-r--r--drivers/hid/hid-ids.h21
-rw-r--r--drivers/hid/hid-input.c11
-rw-r--r--drivers/hid/hid-lg.c29
-rw-r--r--drivers/hid/hid-lg.h4
-rw-r--r--drivers/hid/hid-lg4ff.c403
-rw-r--r--drivers/hid/hid-lgff.c13
-rw-r--r--drivers/hid/hid-logitech-dj.c922
-rw-r--r--drivers/hid/hid-logitech-dj.h123
-rw-r--r--drivers/hid/hid-magicmouse.c7
-rw-r--r--drivers/hid/hid-multitouch.c93
-rw-r--r--drivers/hid/hid-primax.c117
-rw-r--r--drivers/hid/hid-prodikeys.c8
-rw-r--r--drivers/hid/hid-roccat-kone.c63
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c17
-rw-r--r--drivers/hid/hid-roccat-pyra.c23
-rw-r--r--drivers/hid/hid-sjoy.c75
-rw-r--r--drivers/hid/hid-wacom.c79
-rw-r--r--drivers/hid/hid-wiimote.c800
-rw-r--r--drivers/hid/hid-zydacron.c4
-rw-r--r--drivers/hid/hidraw.c11
-rw-r--r--drivers/hid/usbhid/hid-core.c2
-rw-r--r--drivers/hid/usbhid/hid-quirks.c2
-rw-r--r--drivers/hid/usbhid/hiddev.c2
-rw-r--r--drivers/hwmon/coretemp.c216
-rw-r--r--drivers/hwmon/ds620.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c9
-rw-r--r--drivers/hwmon/w83627ehf.c20
-rw-r--r--drivers/hwmon/w83791d.c4
-rw-r--r--drivers/ide/ide-disk.c7
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c10
-rw-r--r--drivers/input/keyboard/adp5588-keys.c1
-rw-r--r--drivers/input/misc/cm109.c2
-rw-r--r--drivers/input/mouse/bcm5974.c20
-rw-r--r--drivers/input/tablet/wacom_sys.c14
-rw-r--r--drivers/input/tablet/wacom_wac.c52
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c2
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/intel-iommu.c75
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-flakey.c4
-rw-r--r--drivers/md/dm-kcopyd.c1
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-table.c32
-rw-r--r--drivers/md/md.c22
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c5
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/media/video/omap/omap_vout.c13
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c1
-rw-r--r--drivers/media/video/uvc/uvc_driver.c2
-rw-r--r--drivers/media/video/uvc/uvc_entity.c2
-rw-r--r--drivers/media/video/uvc/uvc_video.c10
-rw-r--r--drivers/media/video/uvc/uvcvideo.h2
-rw-r--r--drivers/media/video/v4l2-dev.c11
-rw-r--r--drivers/media/video/v4l2-device.c2
-rw-r--r--drivers/mfd/jz4740-adc.c2
-rw-r--r--drivers/mfd/max8997.c5
-rw-r--r--drivers/mfd/omap-usb-host.c2
-rw-r--r--drivers/mfd/tps65910-irq.c2
-rw-r--r--drivers/mfd/twl4030-madc.c5
-rw-r--r--drivers/mfd/wm8350-gpio.c4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c14
-rw-r--r--drivers/mmc/card/block.c3
-rw-r--r--drivers/net/Kconfig11
-rw-r--r--drivers/net/bnx2x/bnx2x.h142
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c27
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c1
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c48
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c46
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c178
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h19
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c7
-rw-r--r--drivers/net/bonding/bond_3ad.c3
-rw-r--r--drivers/net/bonding/bond_alb.c3
-rw-r--r--drivers/net/bonding/bond_main.c20
-rw-r--r--drivers/net/can/mscan/mscan.c11
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c23
-rw-r--r--drivers/net/cxgb3/l2t.c15
-rw-r--r--drivers/net/cxgb3/l2t.h16
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/e1000/e1000_hw.c6
-rw-r--r--drivers/net/gianfar_ethtool.c8
-rw-r--r--drivers/net/greth.c12
-rw-r--r--drivers/net/greth.h1
-rw-r--r--drivers/net/ibmveth.c52
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mlx4/en_tx.c6
-rw-r--r--drivers/net/netconsole.c13
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h12
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c346
-rw-r--r--drivers/net/phy/dp83640.c4
-rw-r--r--drivers/net/ppp_generic.c7
-rw-r--r--drivers/net/pptp.c22
-rw-r--r--drivers/net/pxa168_eth.c1
-rw-r--r--drivers/net/r8169.c104
-rw-r--r--drivers/net/sfc/efx.c18
-rw-r--r--drivers/net/sfc/io.h6
-rw-r--r--drivers/net/sfc/mcdi.c46
-rw-r--r--drivers/net/sfc/nic.c7
-rw-r--r--drivers/net/sfc/nic.h2
-rw-r--r--drivers/net/sfc/siena.c25
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/tg3.c4
-rw-r--r--drivers/net/usb/ipheth.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c10
-rw-r--r--drivers/net/wireless/b43/main.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c21
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c39
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c13
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c8
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c47
-rw-r--r--drivers/net/wireless/rtlwifi/core.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c11
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c1
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/pci/probe.c14
-rw-r--r--drivers/pci/xen-pcifront.c5
-rw-r--r--drivers/pcmcia/sa1100_simpad.c35
-rw-r--r--drivers/s390/cio/cio.c8
-rw-r--r--drivers/scsi/3w-9xxx.c2
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c9
-rw-r--r--drivers/spi/spi-fsl-spi.c3
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/spi/spi-topcliff-pch.c93
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c3
-rw-r--r--drivers/staging/zcache/zcache-main.c2
-rw-r--r--drivers/target/Makefile1
-rw-r--r--drivers/target/iscsi/iscsi_target.c39
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c34
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c49
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c290
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h1
-rw-r--r--drivers/target/loopback/tcm_loop.c18
-rw-r--r--drivers/target/target_core_alua.c20
-rw-r--r--drivers/target/target_core_cdb.c84
-rw-r--r--drivers/target/target_core_configfs.c15
-rw-r--r--drivers/target/target_core_device.c70
-rw-r--r--drivers/target/target_core_fabric_lib.c12
-rw-r--r--drivers/target/target_core_file.c55
-rw-r--r--drivers/target/target_core_file.h4
-rw-r--r--drivers/target/target_core_iblock.c273
-rw-r--r--drivers/target/target_core_iblock.h2
-rw-r--r--drivers/target/target_core_pscsi.c260
-rw-r--r--drivers/target/target_core_pscsi.h1
-rw-r--r--drivers/target/target_core_rd.c18
-rw-r--r--drivers/target/target_core_rd.h2
-rw-r--r--drivers/target/target_core_scdb.c105
-rw-r--r--drivers/target/target_core_scdb.h10
-rw-r--r--drivers/target/target_core_tmr.c250
-rw-r--r--drivers/target/target_core_transport.c1483
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h12
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c107
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c19
-rw-r--r--drivers/target/tcm_fc/tfc_io.c62
-rw-r--r--drivers/tty/serial/lantiq.c4
-rw-r--r--drivers/usb/host/xhci-hub.c2
-rw-r--r--drivers/usb/host/xhci-ring.c19
-rw-r--r--drivers/watchdog/hpwdt.c9
-rw-r--r--drivers/watchdog/lantiq_wdt.c8
-rw-r--r--drivers/watchdog/sbc_epx_c3.c2
-rw-r--r--drivers/watchdog/watchdog_dev.c14
-rw-r--r--drivers/xen/Kconfig10
-rw-r--r--drivers/xen/Makefile4
-rw-r--r--drivers/xen/balloon.c62
-rw-r--r--drivers/xen/events.c87
-rw-r--r--drivers/xen/gntdev.c39
-rw-r--r--drivers/xen/grant-table.c6
-rw-r--r--drivers/xen/pci.c105
-rw-r--r--drivers/xen/swiotlb-xen.c70
-rw-r--r--drivers/xen/xen-pciback/conf_space.c1
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c5
-rw-r--r--drivers/xen/xen-pciback/conf_space_quirks.c3
-rw-r--r--drivers/xen/xen-pciback/passthrough.c34
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c35
-rw-r--r--drivers/xen/xen-pciback/pciback.h32
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c1
-rw-r--r--drivers/xen/xen-pciback/vpci.c35
-rw-r--r--drivers/xen/xen-pciback/xenbus.c27
-rw-r--r--drivers/xen/xen-selfballoon.c67
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c101
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c121
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c17
-rw-r--r--drivers/zorro/zorro-driver.c8
-rw-r--r--drivers/zorro/zorro.c7
-rw-r--r--fs/attr.c5
-rw-r--r--fs/btrfs/file.c33
-rw-r--r--fs/btrfs/inode.c18
-rw-r--r--fs/btrfs/ioctl.c32
-rw-r--r--fs/btrfs/xattr.c50
-rw-r--r--fs/cifs/cifsencrypt.c54
-rw-r--r--fs/cifs/cifsfs.c10
-rw-r--r--fs/cifs/cifssmb.c3
-rw-r--r--fs/cifs/connect.c6
-rw-r--r--fs/cifs/xattr.c40
-rw-r--r--fs/ext2/xattr_security.c34
-rw-r--r--fs/ext3/inode.c4
-rw-r--r--fs/ext3/namei.c3
-rw-r--r--fs/ext3/xattr_security.c36
-rw-r--r--fs/ext4/inode.c4
-rw-r--r--fs/ext4/namei.c3
-rw-r--r--fs/ext4/xattr_security.c36
-rw-r--r--fs/gfs2/inode.c38
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/meta_io.c6
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/quota.c2
-rw-r--r--fs/hfsplus/super.c15
-rw-r--r--fs/hfsplus/wrapper.c4
-rw-r--r--fs/jffs2/security.c35
-rw-r--r--fs/jfs/xattr.c57
-rw-r--r--fs/namei.c8
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nfs/nfs4_fs.h8
-rw-r--r--fs/nfs/nfs4proc.c20
-rw-r--r--fs/nfs/nfs4renewd.c12
-rw-r--r--fs/nfs/nfs4state.c6
-rw-r--r--fs/nfs/super.c25
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/ocfs2/xattr.c38
-rw-r--r--fs/proc/task_mmu.c80
-rw-r--r--fs/quota/quota.c2
-rw-r--r--fs/reiserfs/xattr_security.c4
-rw-r--r--fs/stat.c2
-rw-r--r--fs/xattr.c63
-rw-r--r--fs/xfs/xfs_buf_item.c3
-rw-r--r--fs/xfs/xfs_dquot_item.c10
-rw-r--r--fs/xfs/xfs_inode_item.c10
-rw-r--r--fs/xfs/xfs_iops.c39
-rw-r--r--fs/xfs/xfs_linux.h2
-rw-r--r--fs/xfs/xfs_super.c13
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_trans_ail.c83
-rw-r--r--fs/xfs/xfs_trans_priv.h8
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/linux/blk_types.h6
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/device-mapper.h5
-rw-r--r--include/linux/evm.h100
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/hid.h3
-rw-r--r--include/linux/ima.h13
-rw-r--r--include/linux/integrity.h39
-rw-r--r--include/linux/irqdomain.h1
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kvm.h1
-rw-r--r--include/linux/mfd/wm8994/pdata.h2
-rw-r--r--include/linux/namei.h3
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/ptp_classify.h13
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/security.h32
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/snmp.h2
-rw-r--r--include/linux/xattr.h19
-rw-r--r--include/net/flow.h25
-rw-r--r--include/net/ip_vs.h1
-rw-r--r--include/net/request_sock.h3
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/tcp.h22
-rw-r--r--include/net/transp_v6.h1
-rw-r--r--include/net/udplite.h63
-rw-r--r--include/target/target_core_base.h71
-rw-r--r--include/target/target_core_tmr.h2
-rw-r--r--include/target/target_core_transport.h75
-rw-r--r--include/trace/events/writeback.h10
-rw-r--r--include/xen/balloon.h5
-rw-r--r--include/xen/grant_table.h1
-rw-r--r--include/xen/interface/io/xs_wire.h6
-rw-r--r--include/xen/interface/physdev.h34
-rw-r--r--include/xen/page.h12
-rw-r--r--init/main.c19
-rw-r--r--kernel/cred.c18
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq/irqdomain.c6
-rw-r--r--kernel/posix-cpu-timers.c12
-rw-r--r--kernel/ptrace.c23
-rw-r--r--kernel/resource.c7
-rw-r--r--kernel/sched.c26
-rw-r--r--kernel/sched_rt.c4
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/taskstats.c1
-rw-r--r--kernel/tsacct.c15
-rw-r--r--lib/Kconfig.debug4
-rw-r--r--lib/hexdump.c15
-rw-r--r--lib/xz/xz_dec_bcj.c27
-rw-r--r--mm/backing-dev.c30
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slub.c2
-rw-r--r--net/batman-adv/soft-interface.c10
-rw-r--r--net/bluetooth/hci_event.c17
-rw-r--r--net/bluetooth/l2cap_sock.c4
-rw-r--r--net/bluetooth/rfcomm/sock.c3
-rw-r--r--net/bluetooth/sco.c5
-rw-r--r--net/bridge/br_device.c3
-rw-r--r--net/bridge/br_if.c9
-rw-r--r--net/bridge/br_netlink.c1
-rw-r--r--net/bridge/br_private.h1
-rw-r--r--net/bridge/netfilter/Kconfig2
-rw-r--r--net/caif/caif_dev.c6
-rw-r--r--net/can/af_can.c2
-rw-r--r--net/can/bcm.c53
-rw-r--r--net/ceph/ceph_common.c1
-rw-r--r--net/ceph/messenger.c1
-rw-r--r--net/ceph/osd_client.c4
-rw-r--r--net/ceph/osdmap.c84
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/fib_rules.c9
-rw-r--r--net/core/flow.c36
-rw-r--r--net/core/skbuff.c22
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ipv4/af_inet.c7
-rw-r--r--net/ipv4/fib_semantics.c10
-rw-r--r--net/ipv4/netfilter/ip_queue.c12
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv4/tcp_ipv4.c60
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--net/ipv6/datagram.c5
-rw-r--r--net/ipv6/ip6_flowlabel.c8
-rw-r--r--net/ipv6/ip6mr.c8
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/netfilter/ip6_queue.c12
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/route.c37
-rw-r--r--net/ipv6/tcp_ipv6.c42
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/irda/irsysctl.c6
-rw-r--r--net/irda/qos.c6
-rw-r--r--net/l2tp/l2tp_core.c4
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c133
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c6
-rw-r--r--net/netfilter/nf_conntrack_pptp.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c6
-rw-r--r--net/netfilter/nfnetlink_queue.c4
-rw-r--r--net/netfilter/xt_rateest.c9
-rw-r--r--net/packet/af_packet.c5
-rw-r--r--net/rds/iw_rdma.c13
-rw-r--r--net/sched/cls_rsvp.h27
-rw-r--r--net/sctp/sm_sideeffect.c5
-rw-r--r--net/sctp/sm_statefuns.c6
-rw-r--r--net/wireless/nl80211.c5
-rw-r--r--net/wireless/reg.c1
-rw-r--r--net/wireless/sme.c2
-rw-r--r--net/x25/af_x25.c40
-rw-r--r--net/x25/x25_dev.c6
-rw-r--r--net/x25/x25_facilities.c10
-rw-r--r--net/x25/x25_in.c43
-rw-r--r--net/x25/x25_link.c3
-rw-r--r--net/x25/x25_subr.c14
-rw-r--r--net/xfrm/xfrm_input.c5
-rw-r--r--net/xfrm/xfrm_policy.c10
-rw-r--r--security/Kconfig6
-rw-r--r--security/Makefile4
-rw-r--r--security/apparmor/apparmorfs.c2
-rw-r--r--security/apparmor/ipc.c1
-rw-r--r--security/apparmor/lib.c1
-rw-r--r--security/apparmor/policy_unpack.c12
-rw-r--r--security/apparmor/procattr.c1
-rw-r--r--security/commoncap.c16
-rw-r--r--security/integrity/Kconfig7
-rw-r--r--security/integrity/Makefile12
-rw-r--r--security/integrity/evm/Kconfig13
-rw-r--r--security/integrity/evm/Makefile7
-rw-r--r--security/integrity/evm/evm.h38
-rw-r--r--security/integrity/evm/evm_crypto.c216
-rw-r--r--security/integrity/evm/evm_main.c384
-rw-r--r--security/integrity/evm/evm_posix_acl.c26
-rw-r--r--security/integrity/evm/evm_secfs.c108
-rw-r--r--security/integrity/iint.c172
-rw-r--r--security/integrity/ima/Kconfig1
-rw-r--r--security/integrity/ima/Makefile2
-rw-r--r--security/integrity/ima/ima.h30
-rw-r--r--security/integrity/ima/ima_api.c7
-rw-r--r--security/integrity/ima/ima_fs.c2
-rw-r--r--security/integrity/ima/ima_iint.c169
-rw-r--r--security/integrity/ima/ima_main.c13
-rw-r--r--security/integrity/integrity.h50
-rw-r--r--security/keys/Makefile2
-rw-r--r--security/keys/encrypted-keys/Makefile6
-rw-r--r--security/keys/encrypted-keys/ecryptfs_format.c (renamed from security/keys/ecryptfs_format.c)0
-rw-r--r--security/keys/encrypted-keys/ecryptfs_format.h (renamed from security/keys/ecryptfs_format.h)0
-rw-r--r--security/keys/encrypted-keys/encrypted.c (renamed from security/keys/encrypted.c)49
-rw-r--r--security/keys/encrypted-keys/encrypted.h (renamed from security/keys/encrypted.h)11
-rw-r--r--security/keys/encrypted-keys/masterkey_trusted.c45
-rw-r--r--security/keys/gc.c386
-rw-r--r--security/keys/internal.h4
-rw-r--r--security/keys/key.c121
-rw-r--r--security/keys/keyring.c3
-rw-r--r--security/keys/process_keys.c16
-rw-r--r--security/keys/trusted.c19
-rw-r--r--security/security.c77
-rw-r--r--security/selinux/exports.c1
-rw-r--r--security/selinux/hooks.c13
-rw-r--r--security/selinux/include/avc_ss.h6
-rw-r--r--security/selinux/include/security.h8
-rw-r--r--security/selinux/netlink.c2
-rw-r--r--security/selinux/nlmsgtab.c1
-rw-r--r--security/selinux/selinuxfs.c5
-rw-r--r--security/selinux/ss/conditional.c2
-rw-r--r--security/selinux/ss/conditional.h1
-rw-r--r--security/selinux/ss/policydb.c2
-rw-r--r--security/selinux/ss/services.c3
-rw-r--r--security/smack/smack.h24
-rw-r--r--security/smack/smack_access.c134
-rw-r--r--security/smack/smack_lsm.c266
-rw-r--r--security/smack/smackfs.c277
-rw-r--r--security/tomoyo/Kconfig2
-rw-r--r--security/tomoyo/Makefile4
-rw-r--r--security/tomoyo/audit.c7
-rw-r--r--security/tomoyo/common.c228
-rw-r--r--security/tomoyo/common.h189
-rw-r--r--security/tomoyo/condition.c71
-rw-r--r--security/tomoyo/domain.c209
-rw-r--r--security/tomoyo/environ.c122
-rw-r--r--security/tomoyo/file.c42
-rw-r--r--security/tomoyo/gc.c540
-rw-r--r--security/tomoyo/group.c61
-rw-r--r--security/tomoyo/memory.c39
-rw-r--r--security/tomoyo/network.c771
-rw-r--r--security/tomoyo/realpath.c32
-rw-r--r--security/tomoyo/securityfs_if.c123
-rw-r--r--security/tomoyo/tomoyo.c62
-rw-r--r--security/tomoyo/util.c80
-rw-r--r--sound/core/pcm_lib.c33
-rw-r--r--sound/pci/fm801.c15
-rw-r--r--sound/pci/hda/hda_codec.c6
-rw-r--r--sound/pci/hda/hda_intel.c10
-rw-r--r--sound/pci/hda/patch_cirrus.c2
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_realtek.c17
-rw-r--r--sound/pci/hda/patch_sigmatel.c2
-rw-r--r--sound/soc/blackfin/bf5xx-ad193x.c4
-rw-r--r--sound/soc/blackfin/bf5xx-ad73311.c2
-rw-r--r--sound/soc/codecs/ssm2602.c3
-rw-r--r--sound/soc/codecs/wm8753.c4
-rw-r--r--sound/soc/codecs/wm8962.c26
-rw-r--r--sound/soc/fsl/mpc5200_dma.c6
-rw-r--r--sound/soc/imx/imx-pcm-fiq.c1
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c2
-rw-r--r--sound/soc/omap/mcpdm.c2
-rw-r--r--sound/soc/omap/mcpdm.h2
-rw-r--r--sound/soc/omap/omap-mcbsp.c6
-rw-r--r--sound/soc/pxa/zylonite.c8
-rw-r--r--sound/soc/soc-cache.c12
-rw-r--r--sound/soc/soc-core.c22
-rw-r--r--sound/soc/soc-dapm.c2
-rw-r--r--sound/soc/soc-jack.c2
-rw-r--r--sound/usb/card.c7
-rw-r--r--tools/perf/Makefile9
-rw-r--r--tools/perf/builtin-record.c3
-rw-r--r--tools/perf/builtin-test.c2
-rw-r--r--tools/perf/builtin-top.c9
-rw-r--r--tools/perf/util/event.c5
-rw-r--r--tools/perf/util/event.h2
-rw-r--r--tools/perf/util/evlist.c13
-rw-r--r--tools/perf/util/evlist.h1
-rw-r--r--tools/perf/util/evsel.c57
-rw-r--r--tools/perf/util/probe-finder.c2
-rw-r--r--tools/perf/util/python.c2
-rw-r--r--tools/perf/util/session.h3
-rw-r--r--tools/perf/util/sort.c10
-rw-r--r--tools/perf/util/symbol.c153
888 files changed, 15289 insertions, 8413 deletions
diff --git a/Documentation/ABI/testing/evm b/Documentation/ABI/testing/evm
new file mode 100644
index 00000000000..8374d4557e5
--- /dev/null
+++ b/Documentation/ABI/testing/evm
@@ -0,0 +1,23 @@
+What: security/evm
+Date: March 2011
+Contact: Mimi Zohar <zohar@us.ibm.com>
+Description:
+ EVM protects a file's security extended attributes(xattrs)
+ against integrity attacks. The initial method maintains an
+ HMAC-sha1 value across the extended attributes, storing the
+ value as the extended attribute 'security.evm'.
+
+ EVM depends on the Kernel Key Retention System to provide it
+ with a trusted/encrypted key for the HMAC-sha1 operation.
+ The key is loaded onto the root's keyring using keyctl. Until
+ EVM receives notification that the key has been successfully
+ loaded onto the keyring (echo 1 > <securityfs>/evm), EVM
+ can not create or validate the 'security.evm' xattr, but
+ returns INTEGRITY_UNKNOWN. Loading the key and signaling EVM
+ should be done as early as possible. Normally this is done
+ in the initramfs, which has already been measured as part
+ of the trusted boot. For more information on creating and
+ loading existing trusted/encrypted keys, refer to:
+ Documentation/keys-trusted-encrypted.txt. (A sample dracut
+ patch, which loads the trusted/encrypted key and enables
+ EVM, is available from http://linux-ima.sourceforge.net/#EVM.)
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff b/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff
new file mode 100644
index 00000000000..9aec8ef228b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-logitech-lg4ff
@@ -0,0 +1,7 @@
+What: /sys/module/hid_logitech/drivers/hid:logitech/<dev>/range.
+Date: July 2011
+KernelVersion: 3.2
+Contact: Michal Malý <madcatxster@gmail.com>
+Description: Display minimum, maximum and current range of the steering
+ wheel. Writing a value within min and max boundaries sets the
+ range of the wheel.
diff --git a/Documentation/devicetree/bindings/arm/l2cc.txt b/Documentation/devicetree/bindings/arm/l2cc.txt
new file mode 100644
index 00000000000..7ca52161e7a
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/l2cc.txt
@@ -0,0 +1,44 @@
+* ARM L2 Cache Controller
+
+ARM cores often have a separate level 2 cache controller. There are various
+implementations of the L2 cache controller with compatible programming models.
+The ARM L2 cache representation in the device tree should be done as follows:
+
+Required properties:
+
+- compatible : should be one of:
+ "arm,pl310-cache"
+ "arm,l220-cache"
+ "arm,l210-cache"
+- cache-unified : Specifies the cache is a unified cache.
+- cache-level : Should be set to 2 for a level 2 cache.
+- reg : Physical base address and size of cache controller's memory mapped
+ registers.
+
+Optional properties:
+
+- arm,data-latency : Cycles of latency for Data RAM accesses. Specifies 3 cells of
+ read, write and setup latencies. Minimum valid values are 1. Controllers
+ without setup latency control should use a value of 0.
+- arm,tag-latency : Cycles of latency for Tag RAM accesses. Specifies 3 cells of
+ read, write and setup latencies. Controllers without setup latency control
+ should use 0. Controllers without separate read and write Tag RAM latency
+ values should only use the first cell.
+- arm,dirty-latency : Cycles of latency for Dirty RAMs. This is a single cell.
+- arm,filter-ranges : <start length> Starting address and length of window to
+ filter. Addresses in the filter window are directed to the M1 port. Other
+ addresses will go to the M0 port.
+- interrupts : 1 combined interrupt.
+
+Example:
+
+L2: cache-controller {
+ compatible = "arm,pl310-cache";
+ reg = <0xfff12000 0x1000>;
+ arm,data-latency = <1 1 1>;
+ arm,tag-latency = <2 2 2>;
+ arm,filter-latency = <0x80000000 0x8000000>;
+ cache-unified;
+ cache-level = <2>;
+ interrupts = <45>;
+};
diff --git a/Documentation/hwmon/coretemp b/Documentation/hwmon/coretemp
index fa8776ab9b1..84d46c0c71a 100644
--- a/Documentation/hwmon/coretemp
+++ b/Documentation/hwmon/coretemp
@@ -35,13 +35,6 @@ the Out-Of-Spec bit. Following table summarizes the exported sysfs files:
All Sysfs entries are named with their core_id (represented here by 'X').
tempX_input - Core temperature (in millidegrees Celsius).
tempX_max - All cooling devices should be turned on (on Core2).
- Initialized with IA32_THERM_INTERRUPT. When the CPU
- temperature reaches this temperature, an interrupt is
- generated and tempX_max_alarm is set.
-tempX_max_hyst - If the CPU temperature falls below than temperature,
- an interrupt is generated and tempX_max_alarm is reset.
-tempX_max_alarm - Set if the temperature reaches or exceeds tempX_max.
- Reset if the temperature drops to or below tempX_max_hyst.
tempX_crit - Maximum junction temperature (in millidegrees Celsius).
tempX_crit_alarm - Set when Out-of-spec bit is set, never clears.
Correct CPU operation is no longer guaranteed.
@@ -49,9 +42,10 @@ tempX_label - Contains string "Core X", where X is processor
number. For Package temp, this will be "Physical id Y",
where Y is the package number.
-The TjMax temperature is set to 85 degrees C if undocumented model specific
-register (UMSR) 0xee has bit 30 set. If not the TjMax is 100 degrees C as
-(sometimes) documented in processor datasheet.
+On CPU models which support it, TjMax is read from a model-specific register.
+On other models, it is set to an arbitrary value based on weak heuristics.
+If these heuristics don't work for you, you can pass the correct TjMax value
+as a module parameter (tjmax).
Appendix A. Known TjMax lists (TBD):
Some information comes from ark.intel.com
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index be9370c764c..0e6a1290f04 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -49,6 +49,7 @@ parameter is applicable:
EDD BIOS Enhanced Disk Drive Services (EDD) is enabled
EFI EFI Partitioning (GPT) is enabled
EIDE EIDE/ATAPI support is enabled.
+ EVM Extended Verification Module
FB The frame buffer device is enabled.
FTRACE Function tracing enabled.
GCOV GCOV profiling is enabled.
@@ -760,6 +761,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
This option is obsoleted by the "netdev=" option, which
has equivalent usage. See its documentation for details.
+ evm= [EVM]
+ Format: { "fix" }
+ Permit 'security.evm' to be updated regardless of
+ current integrity status.
+
failslab=
fail_page_alloc=
fail_make_request=[KNL]
@@ -2706,10 +2712,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
functions are at fixed addresses, they make nice
targets for exploits that can control RIP.
- emulate [default] Vsyscalls turn into traps and are
- emulated reasonably safely.
+ emulate Vsyscalls turn into traps and are emulated
+ reasonably safely.
- native Vsyscalls are native syscall instructions.
+ native [default] Vsyscalls are native syscall
+ instructions.
This is a little bit faster than trapping
and makes a few dynamic recompilers work
better than they would in emulation mode.
diff --git a/Documentation/networking/dmfe.txt b/Documentation/networking/dmfe.txt
index 8006c227fda..25320bf19c8 100644
--- a/Documentation/networking/dmfe.txt
+++ b/Documentation/networking/dmfe.txt
@@ -1,3 +1,5 @@
+Note: This driver doesn't have a maintainer.
+
Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver for Linux.
This program is free software; you can redistribute it and/or
@@ -55,7 +57,6 @@ Test and make sure PCI latency is now correct for all cases.
Authors:
Sten Wang <sten_wang@davicom.com.tw > : Original Author
-Tobias Ringstrom <tori@unhappy.mine.nu> : Current Maintainer
Contributors:
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 81546990f41..ca5cdcd0f0e 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1042,7 +1042,7 @@ conf/interface/*:
The functional behaviour for certain settings is different
depending on whether local forwarding is enabled or not.
-accept_ra - BOOLEAN
+accept_ra - INTEGER
Accept Router Advertisements; autoconfigure using them.
Possible values are:
@@ -1106,7 +1106,7 @@ dad_transmits - INTEGER
The amount of Duplicate Address Detection probes to send.
Default: 1
-forwarding - BOOLEAN
+forwarding - INTEGER
Configure interface-specific Host/Router behaviour.
Note: It is recommended to have the same setting on all
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
index 729985ed05b..a177de21d28 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.txt
@@ -27,7 +27,7 @@ applying a filter to each packet that assigns it to one of a small number
of logical flows. Packets for each flow are steered to a separate receive
queue, which in turn can be processed by separate CPUs. This mechanism is
generally known as “Receive-side Scaling†(RSS). The goal of RSS and
-the other scaling techniques to increase performance uniformly.
+the other scaling techniques is to increase performance uniformly.
Multi-queue distribution can also be used for traffic prioritization, but
that is not the focus of these techniques.
@@ -186,10 +186,10 @@ are steered using plain RPS. Multiple table entries may point to the
same CPU. Indeed, with many flows and few CPUs, it is very likely that
a single application thread handles flows with many different flow hashes.
-rps_sock_table is a global flow table that contains the *desired* CPU for
-flows: the CPU that is currently processing the flow in userspace. Each
-table value is a CPU index that is updated during calls to recvmsg and
-sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
+rps_sock_flow_table is a global flow table that contains the *desired* CPU
+for flows: the CPU that is currently processing the flow in userspace.
+Each table value is a CPU index that is updated during calls to recvmsg
+and sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
and tcp_splice_read()).
When the scheduler moves a thread to a new CPU while it has outstanding
@@ -243,7 +243,7 @@ configured. The number of entries in the global flow table is set through:
The number of entries in the per-queue flow table are set through:
- /sys/class/net/<dev>/queues/tx-<n>/rps_flow_cnt
+ /sys/class/net/<dev>/queues/rx-<n>/rps_flow_cnt
== Suggested Configuration
diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt
index 0924aaca330..29bdf62aac0 100644
--- a/Documentation/vm/transhuge.txt
+++ b/Documentation/vm/transhuge.txt
@@ -123,10 +123,11 @@ be automatically shutdown if it's set to "never".
khugepaged runs usually at low frequency so while one may not want to
invoke defrag algorithms synchronously during the page faults, it
should be worth invoking defrag at least in khugepaged. However it's
-also possible to disable defrag in khugepaged:
+also possible to disable defrag in khugepaged by writing 0 or enable
+defrag in khugepaged by writing 1:
-echo yes >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
-echo no >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
+echo 0 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
+echo 1 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
You can also control how many pages khugepaged should scan at each
pass:
diff --git a/MAINTAINERS b/MAINTAINERS
index 3b9aa4ddd30..5ccf370e052 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1278,7 +1278,6 @@ F: drivers/input/misc/ati_remote2.c
ATLX ETHERNET DRIVERS
M: Jay Cliburn <jcliburn@gmail.com>
M: Chris Snook <chris.snook@gmail.com>
-M: Jie Yang <jie.yang@atheros.com>
L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/atl1
W: http://atl1.sourceforge.net
@@ -1574,7 +1573,6 @@ F: drivers/scsi/bfa/
BROCADE BNA 10 GIGABIT ETHERNET DRIVER
M: Rasesh Mody <rmody@brocade.com>
-M: Debashis Dutt <ddutt@brocade.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/bna/
@@ -1758,7 +1756,6 @@ F: Documentation/zh_CN/
CISCO VIC ETHERNET NIC DRIVER
M: Christian Benvenuti <benve@cisco.com>
-M: Vasanthy Kolluri <vkolluri@cisco.com>
M: Roopa Prabhu <roprabhu@cisco.com>
M: David Wang <dwang2@cisco.com>
S: Supported
@@ -2463,7 +2460,7 @@ S: Supported
F: drivers/infiniband/hw/ehca/
EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
-M: Breno Leitao <leitao@linux.vnet.ibm.com>
+M: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ehea/
@@ -2555,6 +2552,11 @@ S: Maintained
F: Documentation/filesystems/ext4.txt
F: fs/ext4/
+Extended Verification Module (EVM)
+M: Mimi Zohar <zohar@us.ibm.com>
+S: Supported
+F: security/integrity/evm/
+
F71805F HARDWARE MONITORING DRIVER
M: Jean Delvare <khali@linux-fr.org>
L: lm-sensors@lm-sensors.org
@@ -3316,7 +3318,7 @@ M: David Woodhouse <dwmw2@infradead.org>
L: iommu@lists.linux-foundation.org
T: git git://git.infradead.org/iommu-2.6.git
S: Supported
-F: drivers/pci/intel-iommu.c
+F: drivers/iommu/intel-iommu.c
F: include/linux/intel-iommu.h
INTEL IOP-ADMA DMA DRIVER
@@ -4415,7 +4417,8 @@ L: netfilter@vger.kernel.org
L: coreteam@netfilter.org
W: http://www.netfilter.org/
W: http://www.iptables.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
S: Supported
F: include/linux/netfilter*
F: include/linux/netfilter/
@@ -6368,15 +6371,14 @@ F: net/ipv4/tcp_lp.c
TEGRA SUPPORT
M: Colin Cross <ccross@android.com>
-M: Erik Gilling <konkers@android.com>
M: Olof Johansson <olof@lixom.net>
+M: Stephen Warren <swarren@nvidia.com>
L: linux-tegra@vger.kernel.org
-T: git git://android.git.kernel.org/kernel/tegra.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/olof/tegra.git
S: Supported
F: arch/arm/mach-tegra
TEHUTI ETHERNET DRIVER
-M: Alexander Indenbaum <baum@tehutinetworks.net>
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
S: Supported
@@ -6450,7 +6452,7 @@ L: tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for users in English)
L: tomoyo-dev@lists.sourceforge.jp (subscribers-only, for developers in Japanese)
L: tomoyo-users@lists.sourceforge.jp (subscribers-only, for users in Japanese)
W: http://tomoyo.sourceforge.jp/
-T: quilt http://svn.sourceforge.jp/svnroot/tomoyo/trunk/2.4.x/tomoyo-lsm/patches/
+T: quilt http://svn.sourceforge.jp/svnroot/tomoyo/trunk/2.5.x/tomoyo-lsm/patches/
S: Maintained
F: security/tomoyo/
@@ -7145,6 +7147,12 @@ L: linux-scsi@vger.kernel.org
S: Maintained
F: drivers/scsi/wd7000.c
+WIIMOTE HID DRIVER
+M: David Herrmann <dh.herrmann@googlemail.com>
+L: linux-input@vger.kernel.org
+S: Maintained
+F: drivers/hid/hid-wiimote*
+
WINBOND CIR DRIVER
M: David Härdeman <david@hardeman.nu>
S: Maintained
@@ -7211,6 +7219,9 @@ W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
S: Supported
F: Documentation/hwmon/wm83??
F: drivers/leds/leds-wm83*.c
+F: drivers/input/misc/wm831x-on.c
+F: drivers/input/touchscreen/wm831x-ts.c
+F: drivers/input/touchscreen/wm97*.c
F: drivers/mfd/wm8*.c
F: drivers/power/wm83*.c
F: drivers/rtc/rtc-wm83*.c
@@ -7220,6 +7231,7 @@ F: drivers/watchdog/wm83*_wdt.c
F: include/linux/mfd/wm831x/
F: include/linux/mfd/wm8350/
F: include/linux/mfd/wm8400*
+F: include/linux/wm97xx.h
F: include/sound/wm????.h
F: sound/soc/codecs/wm*
diff --git a/Makefile b/Makefile
index 522fa4784e6..07bc92544e9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 1
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION =
NAME = "Divemaster Edition"
# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 05b53941b81..7536b9cbb07 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -3,7 +3,7 @@ config ARM
default y
select HAVE_AOUT
select HAVE_DMA_API_DEBUG
- select HAVE_IDE
+ select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
@@ -195,7 +195,8 @@ config VECTORS_BASE
The base address of exception vectors.
config ARM_PATCH_PHYS_VIRT
- bool "Patch physical to virtual translations at runtime"
+ bool "Patch physical to virtual translations at runtime" if EMBEDDED
+ default y
depends on !XIP_KERNEL && MMU
depends on !ARCH_REALVIEW || !SPARSEMEM
help
@@ -204,16 +205,16 @@ config ARM_PATCH_PHYS_VIRT
kernel in system memory.
This can only be used with non-XIP MMU kernels where the base
- of physical memory is at a 16MB boundary, or theoretically 64K
- for the MSM machine class.
+ of physical memory is at a 16MB boundary.
+
+ Only disable this option if you know that you do not require
+ this feature (eg, building a kernel for a single machine) and
+ you need to shrink the kernel to the minimal size.
-config ARM_PATCH_PHYS_VIRT_16BIT
+
+config GENERIC_BUG
def_bool y
- depends on ARM_PATCH_PHYS_VIRT && ARCH_MSM
- help
- This option extends the physical to virtual translation patching
- to allow physical memory down to a theoretical minimum of 64K
- boundaries.
+ depends on BUG
source "init/Kconfig"
@@ -301,7 +302,6 @@ config ARCH_AT91
select ARCH_REQUIRE_GPIOLIB
select HAVE_CLK
select CLKDEV_LOOKUP
- select ARM_PATCH_PHYS_VIRT if MMU
help
This enables support for systems based on the Atmel AT91RM9200,
AT91SAM9 and AT91CAP9 processors.
@@ -385,6 +385,7 @@ config ARCH_FOOTBRIDGE
select CPU_SA110
select FOOTBRIDGE
select GENERIC_CLOCKEVENTS
+ select HAVE_IDE
help
Support for systems based on the DC21285 companion chip
("FootBridge"), such as the Simtec CATS and the Rebel NetWinder.
@@ -631,6 +632,8 @@ config ARCH_PXA
select SPARSE_IRQ
select AUTO_ZRELADDR
select MULTI_IRQ_HANDLER
+ select ARM_CPU_SUSPEND if PM
+ select HAVE_IDE
help
Support for Intel/Marvell's PXA2xx/PXA3xx processor line.
@@ -671,6 +674,7 @@ config ARCH_RPC
select NO_IOPORT
select ARCH_SPARSEMEM_ENABLE
select ARCH_USES_GETTIMEOFFSET
+ select HAVE_IDE
help
On the Acorn Risc-PC, Linux can support the internal IDE disk and
CD-ROM interface, serial and parallel port, and the floppy drive.
@@ -689,6 +693,7 @@ config ARCH_SA1100
select HAVE_SCHED_CLOCK
select TICK_ONESHOT
select ARCH_REQUIRE_GPIOLIB
+ select HAVE_IDE
help
Support for StrongARM 11x0 based boards.
@@ -1283,6 +1288,20 @@ config ARM_ERRATA_364296
processor into full low interrupt latency mode. ARM11MPCore
is not affected.
+config ARM_ERRATA_764369
+ bool "ARM errata: Data cache line maintenance operation by MVA may not succeed"
+ depends on CPU_V7 && SMP
+ help
+ This option enables the workaround for erratum 764369
+ affecting Cortex-A9 MPCore with two or more processors (all
+ current revisions). Under certain timing circumstances, a data
+ cache line maintenance operation by MVA targeting an Inner
+ Shareable memory region may fail to proceed up to either the
+ Point of Coherency or to the Point of Unification of the
+ system. This workaround adds a DSB instruction before the
+ relevant cache maintenance functions and sets a specific bit
+ in the diagnostic control register of the SCU.
+
endmenu
source "arch/arm/common/Kconfig"
@@ -1361,6 +1380,7 @@ config SMP
MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
ARCH_EXYNOS4 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \
ARCH_MSM_SCORPIONMP || ARCH_SHMOBILE
+ depends on MMU
select USE_GENERIC_SMP_HELPERS
select HAVE_ARM_SCU if !ARCH_MSM_SCORPIONMP
help
@@ -1393,6 +1413,31 @@ config SMP_ON_UP
If you don't know what to do here, say Y.
+config ARM_CPU_TOPOLOGY
+ bool "Support cpu topology definition"
+ depends on SMP && CPU_V7
+ default y
+ help
+ Support ARM cpu topology definition. The MPIDR register defines
+ affinity between processors which is then used to describe the cpu
+ topology of an ARM System.
+
+config SCHED_MC
+ bool "Multi-core scheduler support"
+ depends on ARM_CPU_TOPOLOGY
+ help
+ Multi-core scheduler support improves the CPU scheduler's decision
+ making when dealing with multi-core CPU chips at a cost of slightly
+ increased overhead in some places. If unsure say N here.
+
+config SCHED_SMT
+ bool "SMT scheduler support"
+ depends on ARM_CPU_TOPOLOGY
+ help
+ Improves the CPU scheduler's decision making when dealing with
+ MultiThreading at a cost of slightly increased overhead in some
+ places. If unsure say N here.
+
config HAVE_ARM_SCU
bool
help
@@ -1468,6 +1513,7 @@ config THUMB2_KERNEL
depends on CPU_V7 && !CPU_V6 && !CPU_V6K && EXPERIMENTAL
select AEABI
select ARM_ASM_UNIFIED
+ select ARM_UNWIND
help
By enabling this option, the kernel will be compiled in
Thumb-2 mode. A compiler/assembler that understand the unified
@@ -2087,6 +2133,9 @@ config ARCH_SUSPEND_POSSIBLE
CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE
def_bool y
+config ARM_CPU_SUSPEND
+ def_bool PM_SLEEP
+
endmenu
source "net/Kconfig"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 81cbe40c159..df3eb3ccd76 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -65,13 +65,71 @@ config DEBUG_USER
# These options are only for real kernel hackers who want to get their hands dirty.
config DEBUG_LL
- bool "Kernel low-level debugging functions"
+ bool "Kernel low-level debugging functions (read help!)"
depends on DEBUG_KERNEL
help
Say Y here to include definitions of printascii, printch, printhex
in the kernel. This is helpful if you are debugging code that
executes before the console is initialized.
+ Note that selecting this option will limit the kernel to a single
+ UART definition, as specified below. Attempting to boot the kernel
+ image on a different platform *will not work*, so this option should
+ not be enabled for kernels that are intended to be portable.
+
+choice
+ prompt "Kernel low-level debugging port"
+ depends on DEBUG_LL
+
+ config DEBUG_LL_UART_NONE
+ bool "No low-level debugging UART"
+ help
+ Say Y here if your platform doesn't provide a UART option
+ below. This relies on your platform choosing the right UART
+ definition internally in order for low-level debugging to
+ work.
+
+ config DEBUG_ICEDCC
+ bool "Kernel low-level debugging via EmbeddedICE DCC channel"
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the EmbeddedICE macrocell's DCC channel using
+ co-processor 14. This is known to work on the ARM9 style ICE
+ channel and on the XScale with the PEEDI.
+
+ Note that the system will appear to hang during boot if there
+ is nothing connected to read from the DCC.
+
+ config DEBUG_FOOTBRIDGE_COM1
+ bool "Kernel low-level debugging messages via footbridge 8250 at PCI COM1"
+ depends on FOOTBRIDGE
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the 8250 at PCI COM1.
+
+ config DEBUG_DC21285_PORT
+ bool "Kernel low-level debugging messages via footbridge serial port"
+ depends on FOOTBRIDGE
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the serial port in the DC21285 (Footbridge).
+
+ config DEBUG_CLPS711X_UART1
+ bool "Kernel low-level debugging messages via UART1"
+ depends on ARCH_CLPS711X
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the first serial port on these devices.
+
+ config DEBUG_CLPS711X_UART2
+ bool "Kernel low-level debugging messages via UART2"
+ depends on ARCH_CLPS711X
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the second serial port on these devices.
+
+endchoice
+
config EARLY_PRINTK
bool "Early printk"
depends on DEBUG_LL
@@ -80,43 +138,14 @@ config EARLY_PRINTK
kernel low-level debugging functions. Add earlyprintk to your
kernel parameters to enable this console.
-config DEBUG_ICEDCC
- bool "Kernel low-level debugging via EmbeddedICE DCC channel"
- depends on DEBUG_LL
- help
- Say Y here if you want the debug print routines to direct their
- output to the EmbeddedICE macrocell's DCC channel using
- co-processor 14. This is known to work on the ARM9 style ICE
- channel and on the XScale with the PEEDI.
-
- It does include a timeout to ensure that the system does not
- totally freeze when there is nothing connected to read.
-
config OC_ETM
bool "On-chip ETM and ETB"
- select ARM_AMBA
+ depends on ARM_AMBA
help
Enables the on-chip embedded trace macrocell and embedded trace
buffer driver that will allow you to collect traces of the
kernel code.
-config DEBUG_DC21285_PORT
- bool "Kernel low-level debugging messages via footbridge serial port"
- depends on DEBUG_LL && FOOTBRIDGE
- help
- Say Y here if you want the debug print routines to direct their
- output to the serial port in the DC21285 (Footbridge). Saying N
- will cause the debug messages to appear on the first 16550
- serial port.
-
-config DEBUG_CLPS711X_UART2
- bool "Kernel low-level debugging messages via UART2"
- depends on DEBUG_LL && ARCH_CLPS711X
- help
- Say Y here if you want the debug print routines to direct their
- output to the second serial port on these devices. Saying N will
- cause the debug messages to appear on the first serial port.
-
config DEBUG_S3C_UART
depends on PLAT_SAMSUNG
int "S3C UART to use for low-level debug"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 70c424eaf7b..5665c2a3b65 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -128,6 +128,9 @@ textofs-$(CONFIG_PM_H1940) := 0x00108000
ifeq ($(CONFIG_ARCH_SA1100),y)
textofs-$(CONFIG_SA1111) := 0x00208000
endif
+textofs-$(CONFIG_ARCH_MSM7X30) := 0x00208000
+textofs-$(CONFIG_ARCH_MSM8X60) := 0x00208000
+textofs-$(CONFIG_ARCH_MSM8960) := 0x00208000
# Machine directory name. This list is sorted alphanumerically
# by CONFIG_* macro name.
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index a1edfd5a129..176062ac7f0 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -78,7 +78,16 @@ endif
$(obj)/uImage: STARTADDR=$(LOADADDR)
+check_for_multiple_loadaddr = \
+if [ $(words $(LOADADDR)) -gt 1 ]; then \
+ echo 'multiple load addresses: $(LOADADDR)'; \
+ echo 'This is incompatible with uImages'; \
+ echo 'Specify LOADADDR on the commandline to build an uImage'; \
+ false; \
+fi
+
$(obj)/uImage: $(obj)/zImage FORCE
+ @$(check_for_multiple_loadaddr)
$(call if_changed,uimage)
@echo ' Image $@ is ready'
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 0c74a6fab95..a6b30b35ca6 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -139,8 +139,16 @@ bad_syms=$$($(CROSS_COMPILE)nm $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \
( echo "following symbols must have non local/private scope:" >&2; \
echo "$$bad_syms" >&2; rm -f $@; false )
+check_for_multiple_zreladdr = \
+if [ $(words $(ZRELADDR)) -gt 1 -a "$(CONFIG_AUTO_ZRELADDR)" = "" ]; then \
+ echo 'multiple zreladdrs: $(ZRELADDR)'; \
+ echo 'This needs CONFIG_AUTO_ZRELADDR to be set'; \
+ false; \
+fi
+
$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \
$(addprefix $(obj)/, $(OBJS)) $(lib1funcs) FORCE
+ @$(check_for_multiple_zreladdr)
$(call if_changed,ld)
@$(check_for_bad_syms)
diff --git a/arch/arm/boot/dts/tegra-harmony.dts b/arch/arm/boot/dts/tegra-harmony.dts
index 4c053340ce3..e5818668d09 100644
--- a/arch/arm/boot/dts/tegra-harmony.dts
+++ b/arch/arm/boot/dts/tegra-harmony.dts
@@ -57,14 +57,14 @@
};
sdhci@c8000200 {
- gpios = <&gpio 69 0>, /* cd, gpio PI5 */
- <&gpio 57 0>, /* wp, gpio PH1 */
- <&gpio 155 0>; /* power, gpio PT3 */
+ cd-gpios = <&gpio 69 0>; /* gpio PI5 */
+ wp-gpios = <&gpio 57 0>; /* gpio PH1 */
+ power-gpios = <&gpio 155 0>; /* gpio PT3 */
};
sdhci@c8000600 {
- gpios = <&gpio 58 0>, /* cd, gpio PH2 */
- <&gpio 59 0>, /* wp, gpio PH3 */
- <&gpio 70 0>; /* power, gpio PI6 */
+ cd-gpios = <&gpio 58 0>; /* gpio PH2 */
+ wp-gpios = <&gpio 59 0>; /* gpio PH3 */
+ power-gpios = <&gpio 70 0>; /* gpio PI6 */
};
};
diff --git a/arch/arm/boot/dts/tegra-seaboard.dts b/arch/arm/boot/dts/tegra-seaboard.dts
index 1940cae0074..64cedca6fc7 100644
--- a/arch/arm/boot/dts/tegra-seaboard.dts
+++ b/arch/arm/boot/dts/tegra-seaboard.dts
@@ -21,8 +21,8 @@
};
sdhci@c8000400 {
- gpios = <&gpio 69 0>, /* cd, gpio PI5 */
- <&gpio 57 0>, /* wp, gpio PH1 */
- <&gpio 70 0>; /* power, gpio PI6 */
+ cd-gpios = <&gpio 69 0>; /* gpio PI5 */
+ wp-gpios = <&gpio 57 0>; /* gpio PH1 */
+ power-gpios = <&gpio 70 0>; /* gpio PI6 */
};
};
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 3227ca952a1..666b278e56d 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -180,7 +180,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
return -EINVAL;
mask = 0xff << shift;
- bit = 1 << (cpu + shift);
+ bit = 1 << (cpu_logical_map(cpu) + shift);
spin_lock(&irq_controller_lock);
val = readl_relaxed(reg) & ~mask;
@@ -259,9 +259,15 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
unsigned int irq_start)
{
unsigned int gic_irqs, irq_limit, i;
+ u32 cpumask;
void __iomem *base = gic->dist_base;
- u32 cpumask = 1 << smp_processor_id();
+ u32 cpu = 0;
+#ifdef CONFIG_SMP
+ cpu = cpu_logical_map(smp_processor_id());
+#endif
+
+ cpumask = 1 << cpu;
cpumask |= cpumask << 8;
cpumask |= cpumask << 16;
@@ -382,7 +388,12 @@ void __cpuinit gic_enable_ppi(unsigned int irq)
#ifdef CONFIG_SMP
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
- unsigned long map = *cpus_addr(*mask);
+ int cpu;
+ unsigned long map = 0;
+
+ /* Convert our logical CPU mask into a physical one. */
+ for_each_cpu(cpu, mask)
+ map |= 1 << cpu_logical_map(cpu);
/*
* Ensure that stores to Normal memory are visible to the
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
index 97912fa4878..7129cfbdacd 100644
--- a/arch/arm/common/pl330.c
+++ b/arch/arm/common/pl330.c
@@ -1546,7 +1546,7 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
/* Start the next */
case PL330_OP_START:
- if (!_start(thrd))
+ if (!_thrd_active(thrd) && !_start(thrd))
ret = -EIO;
break;
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 7aa4262ada7..01f18a421b1 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -259,7 +259,6 @@ static void __init vic_disable(void __iomem *base)
writel(0, base + VIC_INT_SELECT);
writel(0, base + VIC_INT_ENABLE);
writel(~0, base + VIC_INT_ENABLE_CLEAR);
- writel(0, base + VIC_IRQ_STATUS);
writel(0, base + VIC_ITCR);
writel(~0, base + VIC_INT_SOFT_CLEAR);
}
@@ -347,7 +346,8 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
/* Identify which VIC cell this one is, by reading the ID */
for (i = 0; i < 4; i++) {
- u32 addr = ((u32)base & PAGE_MASK) + 0xfe0 + (i * 4);
+ void __iomem *addr;
+ addr = (void __iomem *)((u32)base & PAGE_MASK) + 0xfe0 + (i * 4);
cellid |= (readl(addr) & 0xff) << (8 * i);
}
vendor = (cellid >> 12) & 0xff;
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index 7196ade07e2..1103f62a196 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -1,5 +1,6 @@
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_TINY_RCU=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -8,20 +9,29 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_INTEGRATOR=y
CONFIG_ARCH_INTEGRATOR_AP=y
+CONFIG_ARCH_INTEGRATOR_CP=y
CONFIG_CPU_ARM720T=y
CONFIG_CPU_ARM920T=y
+CONFIG_CPU_ARM922T=y
+CONFIG_CPU_ARM926T=y
+CONFIG_CPU_ARM1020=y
+CONFIG_CPU_ARM1022=y
+CONFIG_CPU_ARM1026=y
CONFIG_PCI=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
CONFIG_LEDS=y
CONFIG_LEDS_CPU=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="console=ttyAM0,38400n8 root=/dev/nfs ip=bootp mem=32M"
+CONFIG_CMDLINE="console=ttyAM0,38400n8 root=/dev/nfs ip=bootp"
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -32,7 +42,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_AFS_PARTS=y
CONFIG_MTD_CHAR=y
@@ -40,6 +49,7 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_PHYSMAP=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
@@ -56,6 +66,8 @@ CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_MATROX=y
CONFIG_FB_MATROX_MILLENIUM=y
CONFIG_FB_MATROX_MYSTIQUE=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PL030=y
CONFIG_EXT2_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
@@ -68,4 +80,3 @@ CONFIG_NFSD_V3=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_ERRORS=y
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 6550db3aa5c..960abceb8e1 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -1,3 +1,20 @@
include include/asm-generic/Kbuild.asm
header-y += hwcap.h
+
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += cputime.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += ioctl.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += resource.h
+generic-y += sections.h
+generic-y += siginfo.h
+generic-y += sizes.h
diff --git a/arch/arm/include/asm/auxvec.h b/arch/arm/include/asm/auxvec.h
deleted file mode 100644
index c0536f6b29a..00000000000
--- a/arch/arm/include/asm/auxvec.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef __ASMARM_AUXVEC_H
-#define __ASMARM_AUXVEC_H
-
-#endif
diff --git a/arch/arm/include/asm/bitsperlong.h b/arch/arm/include/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b..00000000000
--- a/arch/arm/include/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 4d88425a416..9abe7a07d5a 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -3,21 +3,58 @@
#ifdef CONFIG_BUG
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-extern void __bug(const char *file, int line) __attribute__((noreturn));
-
-/* give file/line information */
-#define BUG() __bug(__FILE__, __LINE__)
+/*
+ * Use a suitable undefined instruction to use for ARM/Thumb2 bug handling.
+ * We need to be careful not to conflict with those used by other modules and
+ * the register_undef_hook() system.
+ */
+#ifdef CONFIG_THUMB2_KERNEL
+#define BUG_INSTR_VALUE 0xde02
+#define BUG_INSTR_TYPE ".hword "
#else
+#define BUG_INSTR_VALUE 0xe7f001f2
+#define BUG_INSTR_TYPE ".word "
+#endif
-/* this just causes an oops */
-#define BUG() do { *(int *)0 = 0; } while (1)
-#endif
+#define BUG() _BUG(__FILE__, __LINE__, BUG_INSTR_VALUE)
+#define _BUG(file, line, value) __BUG(file, line, value)
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+/*
+ * The extra indirection is to ensure that the __FILE__ string comes through
+ * OK. Many version of gcc do not support the asm %c parameter which would be
+ * preferable to this unpleasantness. We use mergeable string sections to
+ * avoid multiple copies of the string appearing in the kernel image.
+ */
+
+#define __BUG(__file, __line, __value) \
+do { \
+ BUILD_BUG_ON(sizeof(struct bug_entry) != 12); \
+ asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n" \
+ ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
+ "2:\t.asciz " #__file "\n" \
+ ".popsection\n" \
+ ".pushsection __bug_table,\"a\"\n" \
+ "3:\t.word 1b, 2b\n" \
+ "\t.hword " #__line ", 0\n" \
+ ".popsection"); \
+ unreachable(); \
+} while (0)
+
+#else /* not CONFIG_DEBUG_BUGVERBOSE */
+
+#define __BUG(__file, __line, __value) \
+do { \
+ asm volatile(BUG_INSTR_TYPE #__value); \
+ unreachable(); \
+} while (0)
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
#define HAVE_ARCH_BUG
-#endif
+#endif /* CONFIG_BUG */
#include <asm-generic/bug.h>
diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h
index c023db09fcc..7ea78144ae2 100644
--- a/arch/arm/include/asm/cachetype.h
+++ b/arch/arm/include/asm/cachetype.h
@@ -7,6 +7,7 @@
#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING)
#define CACHEID_ASID_TAGGED (1 << 3)
#define CACHEID_VIPT_I_ALIASING (1 << 4)
+#define CACHEID_PIPT (1 << 5)
extern unsigned int cacheid;
@@ -16,6 +17,7 @@ extern unsigned int cacheid;
#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING)
#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED)
#define icache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_I_ALIASING)
+#define icache_is_pipt() cacheid_is(CACHEID_PIPT)
/*
* __LINUX_ARM_ARCH__ is the minimum supported CPU architecture
@@ -26,7 +28,8 @@ extern unsigned int cacheid;
#if __LINUX_ARM_ARCH__ >= 7
#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING |\
CACHEID_ASID_TAGGED |\
- CACHEID_VIPT_I_ALIASING)
+ CACHEID_VIPT_I_ALIASING |\
+ CACHEID_PIPT)
#elif __LINUX_ARM_ARCH__ >= 6
#define __CACHEID_ARCH_MIN (~CACHEID_VIVT)
#else
diff --git a/arch/arm/include/asm/cputime.h b/arch/arm/include/asm/cputime.h
deleted file mode 100644
index 3a8002a5fec..00000000000
--- a/arch/arm/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ARM_CPUTIME_H
-#define __ARM_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __ARM_CPUTIME_H */
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index cd4458f6417..cb47d28cbe1 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -8,6 +8,7 @@
#define CPUID_CACHETYPE 1
#define CPUID_TCM 2
#define CPUID_TLBTYPE 3
+#define CPUID_MPIDR 5
#define CPUID_EXT_PFR0 "c1, 0"
#define CPUID_EXT_PFR1 "c1, 1"
@@ -70,6 +71,11 @@ static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void)
return read_cpuid(CPUID_TCM);
}
+static inline unsigned int __attribute_const__ read_cpuid_mpidr(void)
+{
+ return read_cpuid(CPUID_MPIDR);
+}
+
/*
* Intel's XScale3 core supports some v6 features (supersections, L2)
* but advertises itself as v5 as it does not support the v6 ISA. For
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 9f390ce335c..6615f03f56a 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -10,6 +10,9 @@ struct dev_archdata {
#ifdef CONFIG_DMABOUNCE
struct dmabounce_device_info *dmabounce;
#endif
+#ifdef CONFIG_IOMMU_API
+ void *iommu; /* private IOMMU data */
+#endif
};
struct pdev_archdata {
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 7a21d0bf713..28b7ee8d739 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -32,7 +32,7 @@ static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
- return (void *)__bus_to_virt(addr);
+ return (void *)__bus_to_virt((unsigned long)addr);
}
static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
diff --git a/arch/arm/include/asm/ecard.h b/arch/arm/include/asm/ecard.h
index 29f2610efc7..eaea14676d5 100644
--- a/arch/arm/include/asm/ecard.h
+++ b/arch/arm/include/asm/ecard.h
@@ -161,7 +161,6 @@ struct expansion_card {
/* Private internal data */
const char *card_desc; /* Card description */
- CONST unsigned int podaddr; /* Base Linux address for card */
CONST loader_t loader; /* loader program */
u64 dma_mask;
};
diff --git a/arch/arm/include/asm/emergency-restart.h b/arch/arm/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c48e42..00000000000
--- a/arch/arm/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/arm/include/asm/errno.h b/arch/arm/include/asm/errno.h
deleted file mode 100644
index 6e60f0612bb..00000000000
--- a/arch/arm/include/asm/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ARM_ERRNO_H
-#define _ARM_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif
diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h
new file mode 100644
index 00000000000..5abaf5bbd98
--- /dev/null
+++ b/arch/arm/include/asm/exception.h
@@ -0,0 +1,19 @@
+/*
+ * Annotations for marking C functions as exception handlers.
+ *
+ * These should only be used for C functions that are called from the low
+ * level exception entry code and not any intervening C code.
+ */
+#ifndef __ASM_ARM_EXCEPTION_H
+#define __ASM_ARM_EXCEPTION_H
+
+#include <linux/ftrace.h>
+
+#define __exception __attribute__((section(".exception.text")))
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#define __exception_irq_entry __irq_entry
+#else
+#define __exception_irq_entry __exception
+#endif
+
+#endif /* __ASM_ARM_EXCEPTION_H */
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 8c73900da9e..253cc86318b 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -25,17 +25,17 @@
#ifdef CONFIG_SMP
-#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
smp_mb(); \
__asm__ __volatile__( \
- "1: ldrex %1, [%2]\n" \
+ "1: ldrex %1, [%3]\n" \
" " insn "\n" \
- "2: strex %1, %0, [%2]\n" \
- " teq %1, #0\n" \
+ "2: strex %2, %0, [%3]\n" \
+ " teq %2, #0\n" \
" bne 1b\n" \
" mov %0, #0\n" \
- __futex_atomic_ex_table("%4") \
- : "=&r" (ret), "=&r" (oldval) \
+ __futex_atomic_ex_table("%5") \
+ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory")
@@ -73,14 +73,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#include <linux/preempt.h>
#include <asm/domain.h>
-#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
__asm__ __volatile__( \
- "1: " T(ldr) " %1, [%2]\n" \
+ "1: " T(ldr) " %1, [%3]\n" \
" " insn "\n" \
- "2: " T(str) " %0, [%2]\n" \
+ "2: " T(str) " %0, [%3]\n" \
" mov %0, #0\n" \
- __futex_atomic_ex_table("%4") \
- : "=&r" (ret), "=&r" (oldval) \
+ __futex_atomic_ex_table("%5") \
+ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory")
@@ -117,7 +117,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
+ int oldval = 0, ret, tmp;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
@@ -129,19 +129,19 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
switch (op) {
case FUTEX_OP_SET:
- __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
+ __futex_atomic_op("mov %0, %4", ret, oldval, tmp, uaddr, oparg);
break;
case FUTEX_OP_ADD:
- __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
+ __futex_atomic_op("add %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
break;
case FUTEX_OP_OR:
- __futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg);
+ __futex_atomic_op("orr %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
- __futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg);
+ __futex_atomic_op("and %0, %1, %4", ret, oldval, tmp, uaddr, ~oparg);
break;
case FUTEX_OP_XOR:
- __futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg);
+ __futex_atomic_op("eor %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
break;
default:
ret = -ENOSYS;
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 99a6ed7e1bf..434edccdf7f 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -52,6 +52,8 @@
#define L2X0_LOCKDOWN_WAY_D_BASE 0x900
#define L2X0_LOCKDOWN_WAY_I_BASE 0x904
#define L2X0_LOCKDOWN_STRIDE 0x08
+#define L2X0_ADDR_FILTER_START 0xC00
+#define L2X0_ADDR_FILTER_END 0xC04
#define L2X0_TEST_OPERATION 0xF00
#define L2X0_LINE_DATA 0xF10
#define L2X0_LINE_TAG 0xF30
@@ -65,8 +67,23 @@
#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
#define L2X0_CACHE_ID_PART_L210 (1 << 6)
#define L2X0_CACHE_ID_PART_L310 (3 << 6)
+#define L2X0_CACHE_ID_RTL_MASK 0x3f
+#define L2X0_CACHE_ID_RTL_R0P0 0x0
+#define L2X0_CACHE_ID_RTL_R1P0 0x2
+#define L2X0_CACHE_ID_RTL_R2P0 0x4
+#define L2X0_CACHE_ID_RTL_R3P0 0x5
+#define L2X0_CACHE_ID_RTL_R3P1 0x6
+#define L2X0_CACHE_ID_RTL_R3P2 0x8
#define L2X0_AUX_CTRL_MASK 0xc0000fff
+#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0
+#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7
+#define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3
+#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3)
+#define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6
+#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6)
+#define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9
+#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9)
#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
@@ -77,8 +94,33 @@
#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29
#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30
+#define L2X0_LATENCY_CTRL_SETUP_SHIFT 0
+#define L2X0_LATENCY_CTRL_RD_SHIFT 4
+#define L2X0_LATENCY_CTRL_WR_SHIFT 8
+
+#define L2X0_ADDR_FILTER_EN 1
+
#ifndef __ASSEMBLY__
extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
+extern int l2x0_of_init(__u32 aux_val, __u32 aux_mask);
+
+struct l2x0_regs {
+ unsigned long phy_base;
+ unsigned long aux_ctrl;
+ /*
+ * Whether the following registers need to be saved/restored
+ * depends on platform
+ */
+ unsigned long tag_latency;
+ unsigned long data_latency;
+ unsigned long filter_start;
+ unsigned long filter_end;
+ unsigned long prefetch_ctrl;
+ unsigned long pwr_ctrl;
+};
+
+extern struct l2x0_regs l2x0_saved_regs;
+
#endif
#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 4bdb77bb5e6..065d100fa63 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -80,6 +80,7 @@ extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int,
extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
+extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached);
extern void __iounmap(volatile void __iomem *addr);
/*
@@ -110,6 +111,27 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
#include <mach/io.h>
/*
+ * This is the limit of PC card/PCI/ISA IO space, which is by default
+ * 64K if we have PC card, PCI or ISA support. Otherwise, default to
+ * zero to prevent ISA/PCI drivers claiming IO space (and potentially
+ * oopsing.)
+ *
+ * Only set this larger if you really need inb() et.al. to operate over
+ * a larger address space. Note that SOC_COMMON ioremaps each sockets
+ * IO space area, and so inb() et.al. must be defined to operate as per
+ * readb() et.al. on such platforms.
+ */
+#ifndef IO_SPACE_LIMIT
+#if defined(CONFIG_PCMCIA_SOC_COMMON) || defined(CONFIG_PCMCIA_SOC_COMMON_MODULE)
+#define IO_SPACE_LIMIT ((resource_size_t)0xffffffff)
+#elif defined(CONFIG_PCI) || defined(CONFIG_ISA) || defined(CONFIG_PCCARD)
+#define IO_SPACE_LIMIT ((resource_size_t)0xffff)
+#else
+#define IO_SPACE_LIMIT ((resource_size_t)0)
+#endif
+#endif
+
+/*
* IO port access primitives
* -------------------------
*
@@ -189,11 +211,11 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
* IO port primitives for more information.
*/
#ifdef __mem_pci
-#define readb_relaxed(c) ({ u8 __v = __raw_readb(__mem_pci(c)); __v; })
-#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \
- __raw_readw(__mem_pci(c))); __v; })
-#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \
- __raw_readl(__mem_pci(c))); __v; })
+#define readb_relaxed(c) ({ u8 __r = __raw_readb(__mem_pci(c)); __r; })
+#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
+ __raw_readw(__mem_pci(c))); __r; })
+#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
+ __raw_readl(__mem_pci(c))); __r; })
#define writeb_relaxed(v,c) ((void)__raw_writeb(v,__mem_pci(c)))
#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \
@@ -260,10 +282,16 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; })
#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; })
+#define ioread16be(p) ({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32be(p) ({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
+
#define iowrite8(v,p) ({ __iowmb(); (void)__raw_writeb(v, p); })
#define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); })
#define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); })
+#define iowrite16be(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_be32(v), p); })
+
#define ioread8_rep(p,d,c) __raw_readsb(p,d,c)
#define ioread16_rep(p,d,c) __raw_readsw(p,d,c)
#define ioread32_rep(p,d,c) __raw_readsl(p,d,c)
diff --git a/arch/arm/include/asm/ioctl.h b/arch/arm/include/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe..00000000000
--- a/arch/arm/include/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/arm/include/asm/irq_regs.h b/arch/arm/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b7027..00000000000
--- a/arch/arm/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/arm/include/asm/kdebug.h b/arch/arm/include/asm/kdebug.h
deleted file mode 100644
index 6ece1b03766..00000000000
--- a/arch/arm/include/asm/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kdebug.h>
diff --git a/arch/arm/include/asm/local.h b/arch/arm/include/asm/local.h
deleted file mode 100644
index c11c530f74d..00000000000
--- a/arch/arm/include/asm/local.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local.h>
diff --git a/arch/arm/include/asm/local64.h b/arch/arm/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc23..00000000000
--- a/arch/arm/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h
index 080d74f8128..6fd955d34c6 100644
--- a/arch/arm/include/asm/localtimer.h
+++ b/arch/arm/include/asm/localtimer.h
@@ -10,6 +10,8 @@
#ifndef __ASM_ARM_LOCALTIMER_H
#define __ASM_ARM_LOCALTIMER_H
+#include <linux/errno.h>
+
struct clock_event_device;
/*
@@ -22,6 +24,10 @@ void percpu_timer_setup(void);
*/
asmlinkage void do_local_timer(struct pt_regs *);
+/*
+ * Called from C code
+ */
+void handle_local_timer(struct pt_regs *);
#ifdef CONFIG_LOCAL_TIMERS
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 217aa1911dd..c5699987fa9 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -34,8 +34,7 @@ struct machine_desc {
unsigned int reserve_lp1 :1; /* never has lp1 */
unsigned int reserve_lp2 :1; /* never has lp2 */
unsigned int soft_reboot :1; /* soft reboot */
- void (*fixup)(struct machine_desc *,
- struct tag *, char **,
+ void (*fixup)(struct tag *, char **,
struct meminfo *);
void (*reserve)(void);/* reserve mem blocks */
void (*map_io)(void);/* IO mapping function */
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index b8de516e600..441fc4fe826 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -160,7 +160,6 @@
* so that all we need to do is modify the 8-bit constant field.
*/
#define __PV_BITS_31_24 0x81000000
-#define __PV_BITS_23_16 0x00810000
extern unsigned long __pv_phys_offset;
#define PHYS_OFFSET __pv_phys_offset
@@ -178,9 +177,6 @@ static inline unsigned long __virt_to_phys(unsigned long x)
{
unsigned long t;
__pv_stub(x, t, "add", __PV_BITS_31_24);
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
- __pv_stub(t, t, "add", __PV_BITS_23_16);
-#endif
return t;
}
@@ -188,9 +184,6 @@ static inline unsigned long __phys_to_virt(unsigned long x)
{
unsigned long t;
__pv_stub(x, t, "sub", __PV_BITS_31_24);
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
- __pv_stub(t, t, "sub", __PV_BITS_23_16);
-#endif
return t;
}
#else
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 543b44916d2..6c6809f982f 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -31,11 +31,7 @@ struct mod_arch_specific {
/* Add __virt_to_phys patching state as well */
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
-#define MODULE_ARCH_VERMAGIC_P2V "p2v16 "
-#else
#define MODULE_ARCH_VERMAGIC_P2V "p2v8 "
-#endif
#else
#define MODULE_ARCH_VERMAGIC_P2V ""
#endif
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index d8387437ec5..53426c66352 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -34,6 +34,7 @@ struct outer_cache_fns {
void (*sync)(void);
#endif
void (*set_debug)(unsigned long);
+ void (*resume)(void);
};
#ifdef CONFIG_OUTER_CACHE
@@ -74,6 +75,12 @@ static inline void outer_disable(void)
outer_cache.disable();
}
+static inline void outer_resume(void)
+{
+ if (outer_cache.resume)
+ outer_cache.resume();
+}
+
#else
static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index ac75d084888..ca94653f1ec 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -151,47 +151,7 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
-typedef unsigned long pteval_t;
-
-#undef STRICT_MM_TYPECHECKS
-
-#ifdef STRICT_MM_TYPECHECKS
-/*
- * These are used to make use of C type-checking..
- */
-typedef struct { pteval_t pte; } pte_t;
-typedef struct { unsigned long pmd; } pmd_t;
-typedef struct { unsigned long pgd[2]; } pgd_t;
-typedef struct { unsigned long pgprot; } pgprot_t;
-
-#define pte_val(x) ((x).pte)
-#define pmd_val(x) ((x).pmd)
-#define pgd_val(x) ((x).pgd[0])
-#define pgprot_val(x) ((x).pgprot)
-
-#define __pte(x) ((pte_t) { (x) } )
-#define __pmd(x) ((pmd_t) { (x) } )
-#define __pgprot(x) ((pgprot_t) { (x) } )
-
-#else
-/*
- * .. while these make it easier on the compiler
- */
-typedef pteval_t pte_t;
-typedef unsigned long pmd_t;
-typedef unsigned long pgd_t[2];
-typedef unsigned long pgprot_t;
-
-#define pte_val(x) (x)
-#define pmd_val(x) (x)
-#define pgd_val(x) ((x)[0])
-#define pgprot_val(x) (x)
-
-#define __pte(x) (x)
-#define __pmd(x) (x)
-#define __pgprot(x) (x)
-
-#endif /* STRICT_MM_TYPECHECKS */
+#include <asm/pgtable-2level-types.h>
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
deleted file mode 100644
index b4e32d8ec07..00000000000
--- a/arch/arm/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ARM_PERCPU
-#define __ARM_PERCPU
-
-#include <asm-generic/percpu.h>
-
-#endif
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 22de005f159..3e08fd3fbb6 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -105,9 +105,9 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
}
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
- unsigned long prot)
+ pmdval_t prot)
{
- unsigned long pmdval = (pte + PTE_HWTABLE_OFF) | prot;
+ pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot;
pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
flush_pmd_entry(pmdp);
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
new file mode 100644
index 00000000000..5cfba15cb40
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -0,0 +1,93 @@
+/*
+ * arch/arm/include/asm/pgtable-2level-hwdef.h
+ *
+ * Copyright (C) 1995-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_PGTABLE_2LEVEL_HWDEF_H
+#define _ASM_PGTABLE_2LEVEL_HWDEF_H
+
+/*
+ * Hardware page table definitions.
+ *
+ * + Level 1 descriptor (PMD)
+ * - common
+ */
+#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0)
+#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
+#define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
+#define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
+#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
+#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
+#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
+/*
+ * - section
+ */
+#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
+#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
+#define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
+#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 1) << 10)
+#define PMD_SECT_AP_READ (_AT(pmdval_t, 1) << 11)
+#define PMD_SECT_TEX(x) (_AT(pmdval_t, (x)) << 12) /* v5 */
+#define PMD_SECT_APX (_AT(pmdval_t, 1) << 15) /* v6 */
+#define PMD_SECT_S (_AT(pmdval_t, 1) << 16) /* v6 */
+#define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
+#define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
+#define PMD_SECT_AF (_AT(pmdval_t, 0))
+
+#define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
+#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
+#define PMD_SECT_WT (PMD_SECT_CACHEABLE)
+#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
+#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
+#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
+#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
+
+/*
+ * - coarse table (not used)
+ */
+
+/*
+ * + Level 2 descriptor (PTE)
+ * - common
+ */
+#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
+#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0)
+#define PTE_TYPE_LARGE (_AT(pteval_t, 1) << 0)
+#define PTE_TYPE_SMALL (_AT(pteval_t, 2) << 0)
+#define PTE_TYPE_EXT (_AT(pteval_t, 3) << 0) /* v5 */
+#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2)
+#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3)
+
+/*
+ * - extended small page/tiny page
+ */
+#define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
+#define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
+#define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
+#define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
+#define PTE_EXT_AP_UNO_SRO (_AT(pteval_t, 0) << 4)
+#define PTE_EXT_AP_UNO_SRW (PTE_EXT_AP0)
+#define PTE_EXT_AP_URO_SRW (PTE_EXT_AP1)
+#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0)
+#define PTE_EXT_TEX(x) (_AT(pteval_t, (x)) << 6) /* v5 */
+#define PTE_EXT_APX (_AT(pteval_t, 1) << 9) /* v6 */
+#define PTE_EXT_COHERENT (_AT(pteval_t, 1) << 9) /* XScale3 */
+#define PTE_EXT_SHARED (_AT(pteval_t, 1) << 10) /* v6 */
+#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* v6 */
+
+/*
+ * - small page
+ */
+#define PTE_SMALL_AP_MASK (_AT(pteval_t, 0xff) << 4)
+#define PTE_SMALL_AP_UNO_SRO (_AT(pteval_t, 0x00) << 4)
+#define PTE_SMALL_AP_UNO_SRW (_AT(pteval_t, 0x55) << 4)
+#define PTE_SMALL_AP_URO_SRW (_AT(pteval_t, 0xaa) << 4)
+#define PTE_SMALL_AP_URW_SRW (_AT(pteval_t, 0xff) << 4)
+
+#define PHYS_MASK (~0UL)
+
+#endif
diff --git a/arch/arm/include/asm/pgtable-2level-types.h b/arch/arm/include/asm/pgtable-2level-types.h
new file mode 100644
index 00000000000..66cb5b0e89c
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-2level-types.h
@@ -0,0 +1,67 @@
+/*
+ * arch/arm/include/asm/pgtable-2level-types.h
+ *
+ * Copyright (C) 1995-2003 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_PGTABLE_2LEVEL_TYPES_H
+#define _ASM_PGTABLE_2LEVEL_TYPES_H
+
+#include <asm/types.h>
+
+typedef u32 pteval_t;
+typedef u32 pmdval_t;
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { pteval_t pte; } pte_t;
+typedef struct { pmdval_t pmd; } pmd_t;
+typedef struct { pmdval_t pgd[2]; } pgd_t;
+typedef struct { pteval_t pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd[0])
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef pteval_t pte_t;
+typedef pmdval_t pmd_t;
+typedef pmdval_t pgd_t[2];
+typedef pteval_t pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) ((x)[0])
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgprot(x) (x)
+
+#endif /* STRICT_MM_TYPECHECKS */
+
+#endif /* _ASM_PGTABLE_2LEVEL_TYPES_H */
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
new file mode 100644
index 00000000000..470457e1cfc
--- /dev/null
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -0,0 +1,143 @@
+/*
+ * arch/arm/include/asm/pgtable-2level.h
+ *
+ * Copyright (C) 1995-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_PGTABLE_2LEVEL_H
+#define _ASM_PGTABLE_2LEVEL_H
+
+/*
+ * Hardware-wise, we have a two level page table structure, where the first
+ * level has 4096 entries, and the second level has 256 entries. Each entry
+ * is one 32-bit word. Most of the bits in the second level entry are used
+ * by hardware, and there aren't any "accessed" and "dirty" bits.
+ *
+ * Linux on the other hand has a three level page table structure, which can
+ * be wrapped to fit a two level page table structure easily - using the PGD
+ * and PTE only. However, Linux also expects one "PTE" table per page, and
+ * at least a "dirty" bit.
+ *
+ * Therefore, we tweak the implementation slightly - we tell Linux that we
+ * have 2048 entries in the first level, each of which is 8 bytes (iow, two
+ * hardware pointers to the second level.) The second level contains two
+ * hardware PTE tables arranged contiguously, preceded by Linux versions
+ * which contain the state information Linux needs. We, therefore, end up
+ * with 512 entries in the "PTE" level.
+ *
+ * This leads to the page tables having the following layout:
+ *
+ * pgd pte
+ * | |
+ * +--------+
+ * | | +------------+ +0
+ * +- - - - + | Linux pt 0 |
+ * | | +------------+ +1024
+ * +--------+ +0 | Linux pt 1 |
+ * | |-----> +------------+ +2048
+ * +- - - - + +4 | h/w pt 0 |
+ * | |-----> +------------+ +3072
+ * +--------+ +8 | h/w pt 1 |
+ * | | +------------+ +4096
+ *
+ * See L_PTE_xxx below for definitions of bits in the "Linux pt", and
+ * PTE_xxx for definitions of bits appearing in the "h/w pt".
+ *
+ * PMD_xxx definitions refer to bits in the first level page table.
+ *
+ * The "dirty" bit is emulated by only granting hardware write permission
+ * iff the page is marked "writable" and "dirty" in the Linux PTE. This
+ * means that a write to a clean page will cause a permission fault, and
+ * the Linux MM layer will mark the page dirty via handle_pte_fault().
+ * For the hardware to notice the permission change, the TLB entry must
+ * be flushed, and ptep_set_access_flags() does that for us.
+ *
+ * The "accessed" or "young" bit is emulated by a similar method; we only
+ * allow accesses to the page if the "young" bit is set. Accesses to the
+ * page will cause a fault, and handle_pte_fault() will set the young bit
+ * for us as long as the page is marked present in the corresponding Linux
+ * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is
+ * up to date.
+ *
+ * However, when the "young" bit is cleared, we deny access to the page
+ * by clearing the hardware PTE. Currently Linux does not flush the TLB
+ * for us in this case, which means the TLB will retain the transation
+ * until either the TLB entry is evicted under pressure, or a context
+ * switch which changes the user space mapping occurs.
+ */
+#define PTRS_PER_PTE 512
+#define PTRS_PER_PMD 1
+#define PTRS_PER_PGD 2048
+
+#define PTE_HWTABLE_PTRS (PTRS_PER_PTE)
+#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
+#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
+
+/*
+ * PMD_SHIFT determines the size of the area a second-level page table can map
+ * PGDIR_SHIFT determines what a third-level page table entry can map
+ */
+#define PMD_SHIFT 21
+#define PGDIR_SHIFT 21
+
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * section address mask and size definitions.
+ */
+#define SECTION_SHIFT 20
+#define SECTION_SIZE (1UL << SECTION_SHIFT)
+#define SECTION_MASK (~(SECTION_SIZE-1))
+
+/*
+ * ARMv6 supersection address mask and size definitions.
+ */
+#define SUPERSECTION_SHIFT 24
+#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT)
+#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1))
+
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+
+/*
+ * "Linux" PTE definitions.
+ *
+ * We keep two sets of PTEs - the hardware and the linux version.
+ * This allows greater flexibility in the way we map the Linux bits
+ * onto the hardware tables, and allows us to have YOUNG and DIRTY
+ * bits.
+ *
+ * The PTE table pointer refers to the hardware entries; the "Linux"
+ * entries are stored 1024 bytes below.
+ */
+#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0)
+#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1)
+#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
+#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6)
+#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7)
+#define L_PTE_USER (_AT(pteval_t, 1) << 8)
+#define L_PTE_XN (_AT(pteval_t, 1) << 9)
+#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
+
+/*
+ * These are the memory types, defined to be compatible with
+ * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
+ */
+#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */
+#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */
+#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */
+#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */
+#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */
+#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */
+#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */
+#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
+#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
+#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
+#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
+
+#endif /* _ASM_PGTABLE_2LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-hwdef.h b/arch/arm/include/asm/pgtable-hwdef.h
index fd1521d5cb9..183111164ce 100644
--- a/arch/arm/include/asm/pgtable-hwdef.h
+++ b/arch/arm/include/asm/pgtable-hwdef.h
@@ -10,81 +10,6 @@
#ifndef _ASMARM_PGTABLE_HWDEF_H
#define _ASMARM_PGTABLE_HWDEF_H
-/*
- * Hardware page table definitions.
- *
- * + Level 1 descriptor (PMD)
- * - common
- */
-#define PMD_TYPE_MASK (3 << 0)
-#define PMD_TYPE_FAULT (0 << 0)
-#define PMD_TYPE_TABLE (1 << 0)
-#define PMD_TYPE_SECT (2 << 0)
-#define PMD_BIT4 (1 << 4)
-#define PMD_DOMAIN(x) ((x) << 5)
-#define PMD_PROTECTION (1 << 9) /* v5 */
-/*
- * - section
- */
-#define PMD_SECT_BUFFERABLE (1 << 2)
-#define PMD_SECT_CACHEABLE (1 << 3)
-#define PMD_SECT_XN (1 << 4) /* v6 */
-#define PMD_SECT_AP_WRITE (1 << 10)
-#define PMD_SECT_AP_READ (1 << 11)
-#define PMD_SECT_TEX(x) ((x) << 12) /* v5 */
-#define PMD_SECT_APX (1 << 15) /* v6 */
-#define PMD_SECT_S (1 << 16) /* v6 */
-#define PMD_SECT_nG (1 << 17) /* v6 */
-#define PMD_SECT_SUPER (1 << 18) /* v6 */
-
-#define PMD_SECT_UNCACHED (0)
-#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
-#define PMD_SECT_WT (PMD_SECT_CACHEABLE)
-#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
-#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
-#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
-#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
-
-/*
- * - coarse table (not used)
- */
-
-/*
- * + Level 2 descriptor (PTE)
- * - common
- */
-#define PTE_TYPE_MASK (3 << 0)
-#define PTE_TYPE_FAULT (0 << 0)
-#define PTE_TYPE_LARGE (1 << 0)
-#define PTE_TYPE_SMALL (2 << 0)
-#define PTE_TYPE_EXT (3 << 0) /* v5 */
-#define PTE_BUFFERABLE (1 << 2)
-#define PTE_CACHEABLE (1 << 3)
-
-/*
- * - extended small page/tiny page
- */
-#define PTE_EXT_XN (1 << 0) /* v6 */
-#define PTE_EXT_AP_MASK (3 << 4)
-#define PTE_EXT_AP0 (1 << 4)
-#define PTE_EXT_AP1 (2 << 4)
-#define PTE_EXT_AP_UNO_SRO (0 << 4)
-#define PTE_EXT_AP_UNO_SRW (PTE_EXT_AP0)
-#define PTE_EXT_AP_URO_SRW (PTE_EXT_AP1)
-#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0)
-#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */
-#define PTE_EXT_APX (1 << 9) /* v6 */
-#define PTE_EXT_COHERENT (1 << 9) /* XScale3 */
-#define PTE_EXT_SHARED (1 << 10) /* v6 */
-#define PTE_EXT_NG (1 << 11) /* v6 */
-
-/*
- * - small page
- */
-#define PTE_SMALL_AP_MASK (0xff << 4)
-#define PTE_SMALL_AP_UNO_SRO (0x00 << 4)
-#define PTE_SMALL_AP_UNO_SRW (0x55 << 4)
-#define PTE_SMALL_AP_URO_SRW (0xaa << 4)
-#define PTE_SMALL_AP_URW_SRW (0xff << 4)
+#include <asm/pgtable-2level-hwdef.h>
#endif
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 5750704e027..8ade1840c6f 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -24,6 +24,8 @@
#include <mach/vmalloc.h>
#include <asm/pgtable-hwdef.h>
+#include <asm/pgtable-2level.h>
+
/*
* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
@@ -41,79 +43,6 @@
#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#endif
-/*
- * Hardware-wise, we have a two level page table structure, where the first
- * level has 4096 entries, and the second level has 256 entries. Each entry
- * is one 32-bit word. Most of the bits in the second level entry are used
- * by hardware, and there aren't any "accessed" and "dirty" bits.
- *
- * Linux on the other hand has a three level page table structure, which can
- * be wrapped to fit a two level page table structure easily - using the PGD
- * and PTE only. However, Linux also expects one "PTE" table per page, and
- * at least a "dirty" bit.
- *
- * Therefore, we tweak the implementation slightly - we tell Linux that we
- * have 2048 entries in the first level, each of which is 8 bytes (iow, two
- * hardware pointers to the second level.) The second level contains two
- * hardware PTE tables arranged contiguously, preceded by Linux versions
- * which contain the state information Linux needs. We, therefore, end up
- * with 512 entries in the "PTE" level.
- *
- * This leads to the page tables having the following layout:
- *
- * pgd pte
- * | |
- * +--------+
- * | | +------------+ +0
- * +- - - - + | Linux pt 0 |
- * | | +------------+ +1024
- * +--------+ +0 | Linux pt 1 |
- * | |-----> +------------+ +2048
- * +- - - - + +4 | h/w pt 0 |
- * | |-----> +------------+ +3072
- * +--------+ +8 | h/w pt 1 |
- * | | +------------+ +4096
- *
- * See L_PTE_xxx below for definitions of bits in the "Linux pt", and
- * PTE_xxx for definitions of bits appearing in the "h/w pt".
- *
- * PMD_xxx definitions refer to bits in the first level page table.
- *
- * The "dirty" bit is emulated by only granting hardware write permission
- * iff the page is marked "writable" and "dirty" in the Linux PTE. This
- * means that a write to a clean page will cause a permission fault, and
- * the Linux MM layer will mark the page dirty via handle_pte_fault().
- * For the hardware to notice the permission change, the TLB entry must
- * be flushed, and ptep_set_access_flags() does that for us.
- *
- * The "accessed" or "young" bit is emulated by a similar method; we only
- * allow accesses to the page if the "young" bit is set. Accesses to the
- * page will cause a fault, and handle_pte_fault() will set the young bit
- * for us as long as the page is marked present in the corresponding Linux
- * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is
- * up to date.
- *
- * However, when the "young" bit is cleared, we deny access to the page
- * by clearing the hardware PTE. Currently Linux does not flush the TLB
- * for us in this case, which means the TLB will retain the transation
- * until either the TLB entry is evicted under pressure, or a context
- * switch which changes the user space mapping occurs.
- */
-#define PTRS_PER_PTE 512
-#define PTRS_PER_PMD 1
-#define PTRS_PER_PGD 2048
-
-#define PTE_HWTABLE_PTRS (PTRS_PER_PTE)
-#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
-#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
-
-/*
- * PMD_SHIFT determines the size of the area a second-level page table can map
- * PGDIR_SHIFT determines what a third-level page table entry can map
- */
-#define PMD_SHIFT 21
-#define PGDIR_SHIFT 21
-
#define LIBRARY_TEXT_START 0x0c000000
#ifndef __ASSEMBLY__
@@ -124,12 +53,6 @@ extern void __pgd_error(const char *file, int line, pgd_t);
#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
-#endif /* !__ASSEMBLY__ */
-
-#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
/*
* This is the lowest virtual address we can permit any user space
@@ -138,60 +61,6 @@ extern void __pgd_error(const char *file, int line, pgd_t);
*/
#define FIRST_USER_ADDRESS PAGE_SIZE
-#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-
-/*
- * section address mask and size definitions.
- */
-#define SECTION_SHIFT 20
-#define SECTION_SIZE (1UL << SECTION_SHIFT)
-#define SECTION_MASK (~(SECTION_SIZE-1))
-
-/*
- * ARMv6 supersection address mask and size definitions.
- */
-#define SUPERSECTION_SHIFT 24
-#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT)
-#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1))
-
-/*
- * "Linux" PTE definitions.
- *
- * We keep two sets of PTEs - the hardware and the linux version.
- * This allows greater flexibility in the way we map the Linux bits
- * onto the hardware tables, and allows us to have YOUNG and DIRTY
- * bits.
- *
- * The PTE table pointer refers to the hardware entries; the "Linux"
- * entries are stored 1024 bytes below.
- */
-#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0)
-#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1)
-#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
-#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6)
-#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7)
-#define L_PTE_USER (_AT(pteval_t, 1) << 8)
-#define L_PTE_XN (_AT(pteval_t, 1) << 9)
-#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
-
-/*
- * These are the memory types, defined to be compatible with
- * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
- */
-#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */
-#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */
-#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */
-#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */
-#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */
-#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */
-#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */
-#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
-#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
-#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
-#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
-
-#ifndef __ASSEMBLY__
-
/*
* The pgprot_* and protection_map entries will be fixed up in runtime
* to include the cachable and bufferable bits based on memory policy,
@@ -327,10 +196,10 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
- return __va(pmd_val(pmd) & PAGE_MASK);
+ return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
}
-#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
+#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
/* we don't need complex calculations here as the pmd is folded into the pgd */
#define pmd_addr_end(addr,end) (end)
@@ -351,7 +220,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
#define pte_unmap(pte) __pte_unmap(pte)
-#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
diff --git a/arch/arm/include/asm/poll.h b/arch/arm/include/asm/poll.h
deleted file mode 100644
index c98509d3149..00000000000
--- a/arch/arm/include/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/poll.h>
diff --git a/arch/arm/include/asm/resource.h b/arch/arm/include/asm/resource.h
deleted file mode 100644
index 734b581b5b6..00000000000
--- a/arch/arm/include/asm/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ARM_RESOURCE_H
-#define _ARM_RESOURCE_H
-
-#include <asm-generic/resource.h>
-
-#endif
diff --git a/arch/arm/include/asm/sections.h b/arch/arm/include/asm/sections.h
deleted file mode 100644
index 2b8c5160388..00000000000
--- a/arch/arm/include/asm/sections.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sections.h>
diff --git a/arch/arm/include/asm/siginfo.h b/arch/arm/include/asm/siginfo.h
deleted file mode 100644
index 5e21852e603..00000000000
--- a/arch/arm/include/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASMARM_SIGINFO_H
-#define _ASMARM_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif
diff --git a/arch/arm/include/asm/sizes.h b/arch/arm/include/asm/sizes.h
deleted file mode 100644
index 154b89b81d3..00000000000
--- a/arch/arm/include/asm/sizes.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-/* Size definitions
- * Copyright (C) ARM Limited 1998. All rights reserved.
- */
-#include <asm-generic/sizes.h>
-
-#define SZ_48M (SZ_32M + SZ_16M)
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index e42d96a45d3..0a17b62538c 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -33,6 +33,11 @@ extern void show_ipi_list(struct seq_file *, int);
asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
/*
+ * Called from C code, this handles an IPI.
+ */
+void handle_IPI(int ipinr, struct pt_regs *regs);
+
+/*
* Setup the set of possible CPUs (via set_cpu_possible)
*/
extern void smp_init_cpus(void);
@@ -66,6 +71,12 @@ extern void platform_secondary_init(unsigned int cpu);
extern void platform_smp_prepare_cpus(unsigned int);
/*
+ * Logical CPU mapping.
+ */
+extern int __cpu_logical_map[NR_CPUS];
+#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
+
+/*
* Initial data for bringing up a secondary CPU.
*/
struct secondary_data {
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 832888d0c20..984014b9264 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -57,18 +57,12 @@
#ifndef __ASSEMBLY__
+#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/irqflags.h>
#include <asm/outercache.h>
-#define __exception __attribute__((section(".exception.text")))
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-#define __exception_irq_entry __irq_entry
-#else
-#define __exception_irq_entry __exception
-#endif
-
struct thread_info;
struct task_struct;
@@ -97,14 +91,13 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-extern asmlinkage void __backtrace(void);
extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
struct mm_struct;
extern void show_pte(struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *);
-extern int cpu_architecture(void);
+extern int __pure cpu_architecture(void);
extern void cpu_init(void);
void arm_machine_restart(char mode, const char *cmd);
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 8077145698f..02b2f820398 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -471,7 +471,7 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
* these operations. This is typically used when we are removing
* PMD entries.
*/
-static inline void flush_pmd_entry(pmd_t *pmd)
+static inline void flush_pmd_entry(void *pmd)
{
const unsigned int __tlb_flag = __cpu_tlb_flags;
@@ -487,7 +487,7 @@ static inline void flush_pmd_entry(pmd_t *pmd)
dsb();
}
-static inline void clean_pmd_entry(pmd_t *pmd)
+static inline void clean_pmd_entry(void *pmd)
{
const unsigned int __tlb_flag = __cpu_tlb_flags;
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index accbd7cad9b..a7e457ed27c 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -1,6 +1,39 @@
#ifndef _ASM_ARM_TOPOLOGY_H
#define _ASM_ARM_TOPOLOGY_H
+#ifdef CONFIG_ARM_CPU_TOPOLOGY
+
+#include <linux/cpumask.h>
+
+struct cputopo_arm {
+ int thread_id;
+ int core_id;
+ int socket_id;
+ cpumask_t thread_sibling;
+ cpumask_t core_sibling;
+};
+
+extern struct cputopo_arm cpu_topology[NR_CPUS];
+
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
+#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
+#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
+#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
+
+#define mc_capable() (cpu_topology[0].socket_id != -1)
+#define smt_capable() (cpu_topology[0].thread_id != -1)
+
+void init_cpu_topology(void);
+void store_cpu_topology(unsigned int cpuid);
+const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
+
+#else
+
+static inline void init_cpu_topology(void) { }
+static inline void store_cpu_topology(unsigned int cpuid) { }
+
+#endif
+
#include <asm-generic/topology.h>
#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 2c04ed5efeb..c60a2944f95 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -478,8 +478,8 @@
/*
* Unimplemented (or alternatively implemented) syscalls
*/
-#define __IGNORE_fadvise64_64 1
-#define __IGNORE_migrate_pages 1
+#define __IGNORE_fadvise64_64
+#define __IGNORE_migrate_pages
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_UNISTD_H */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index f7887dc53c1..68036eece34 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
-obj-$(CONFIG_PM_SLEEP) += sleep.o
+obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o
obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_SMP) += smp.o smp_tlb.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
@@ -66,6 +66,7 @@ obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_CPU_HAS_PMU) += pmu.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
+obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
ifneq ($(CONFIG_ARCH_EBSA110),y)
obj-y += io.o
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index aeef960ff79..8e3c6f11b0a 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -49,9 +49,6 @@ extern void __aeabi_ulcmp(void);
extern void fpundefinstr(void);
-
-EXPORT_SYMBOL(__backtrace);
-
/* platform dependent support */
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__const_udelay);
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 16baba2e436..1429d8989fb 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -20,6 +20,7 @@
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/procinfo.h>
+#include <asm/hardware/cache-l2x0.h>
#include <linux/kbuild.h>
/*
@@ -92,6 +93,17 @@ int main(void)
DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
BLANK();
+#ifdef CONFIG_CACHE_L2X0
+ DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base));
+ DEFINE(L2X0_R_AUX_CTRL, offsetof(struct l2x0_regs, aux_ctrl));
+ DEFINE(L2X0_R_TAG_LATENCY, offsetof(struct l2x0_regs, tag_latency));
+ DEFINE(L2X0_R_DATA_LATENCY, offsetof(struct l2x0_regs, data_latency));
+ DEFINE(L2X0_R_FILTER_START, offsetof(struct l2x0_regs, filter_start));
+ DEFINE(L2X0_R_FILTER_END, offsetof(struct l2x0_regs, filter_end));
+ DEFINE(L2X0_R_PREFETCH_CTRL, offsetof(struct l2x0_regs, prefetch_ctrl));
+ DEFINE(L2X0_R_PWR_CTRL, offsetof(struct l2x0_regs, pwr_ctrl));
+ BLANK();
+#endif
#ifdef CONFIG_CPU_HAS_ASID
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
BLANK();
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index d6df359408f..c0d9203fc75 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -412,6 +412,9 @@ void pcibios_fixup_bus(struct pci_bus *bus)
printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n",
bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis");
}
+#ifdef CONFIG_HOTPLUG
+EXPORT_SYMBOL(pcibios_fixup_bus);
+#endif
/*
* Convert from Linux-centric to bus-centric addresses for bridge devices.
@@ -431,6 +434,7 @@ pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
region->start = res->start - offset;
region->end = res->end - offset;
}
+EXPORT_SYMBOL(pcibios_resource_to_bus);
void __devinit
pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
@@ -447,12 +451,7 @@ pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
res->start = region->start + offset;
res->end = region->end + offset;
}
-
-#ifdef CONFIG_HOTPLUG
-EXPORT_SYMBOL(pcibios_fixup_bus);
-EXPORT_SYMBOL(pcibios_resource_to_bus);
EXPORT_SYMBOL(pcibios_bus_to_resource);
-#endif
/*
* Swizzle the device pin each time we cross a bridge.
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index bcd66e00bdb..0f852d082fc 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -151,6 +151,8 @@ printhex: adr r2, hexbuf
b printascii
ENDPROC(printhex2)
+hexbuf: .space 16
+
.ltorg
ENTRY(printascii)
@@ -175,5 +177,3 @@ ENTRY(printch)
mov r0, #0
b 1b
ENDPROC(printch)
-
-hexbuf: .space 16
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
index d16500110ee..4dd0edab6a6 100644
--- a/arch/arm/kernel/ecard.c
+++ b/arch/arm/kernel/ecard.c
@@ -237,7 +237,7 @@ static void ecard_init_pgtables(struct mm_struct *mm)
memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (IO_SIZE / PGDIR_SIZE));
- src_pgd = pgd_offset(mm, EASI_BASE);
+ src_pgd = pgd_offset(mm, (unsigned long)EASI_BASE);
dst_pgd = pgd_offset(mm, EASI_START);
memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
@@ -674,44 +674,37 @@ static int __init ecard_probeirqhw(void)
#define ecard_probeirqhw() (0)
#endif
-#ifndef IO_EC_MEMC8_BASE
-#define IO_EC_MEMC8_BASE 0
-#endif
-
-static unsigned int __ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed)
+static void __iomem *__ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed)
{
- unsigned long address = 0;
+ void __iomem *address = NULL;
int slot = ec->slot_no;
if (ec->slot_no == 8)
- return IO_EC_MEMC8_BASE;
+ return ECARD_MEMC8_BASE;
ectcr &= ~(1 << slot);
switch (type) {
case ECARD_MEMC:
if (slot < 4)
- address = IO_EC_MEMC_BASE + (slot << 12);
+ address = ECARD_MEMC_BASE + (slot << 14);
break;
case ECARD_IOC:
if (slot < 4)
- address = IO_EC_IOC_BASE + (slot << 12);
-#ifdef IO_EC_IOC4_BASE
+ address = ECARD_IOC_BASE + (slot << 14);
else
- address = IO_EC_IOC4_BASE + ((slot - 4) << 12);
-#endif
+ address = ECARD_IOC4_BASE + ((slot - 4) << 14);
if (address)
- address += speed << 17;
+ address += speed << 19;
break;
-#ifdef IO_EC_EASI_BASE
case ECARD_EASI:
- address = IO_EC_EASI_BASE + (slot << 22);
+ address = ECARD_EASI_BASE + (slot << 24);
if (speed == ECARD_FAST)
ectcr |= 1 << slot;
break;
-#endif
+
default:
break;
}
@@ -990,6 +983,7 @@ ecard_probe(int slot, card_type_t type)
ecard_t **ecp;
ecard_t *ec;
struct ex_ecid cid;
+ void __iomem *addr;
int i, rc;
ec = ecard_alloc_card(type, slot);
@@ -999,7 +993,7 @@ ecard_probe(int slot, card_type_t type)
}
rc = -ENODEV;
- if ((ec->podaddr = __ecard_address(ec, type, ECARD_SYNC)) == 0)
+ if ((addr = __ecard_address(ec, type, ECARD_SYNC)) == NULL)
goto nodev;
cid.r_zero = 1;
@@ -1019,7 +1013,7 @@ ecard_probe(int slot, card_type_t type)
ec->cid.fiqmask = cid.r_fiqmask;
ec->cid.fiqoff = ecard_gets24(cid.r_fiqoff);
ec->fiqaddr =
- ec->irqaddr = (void __iomem *)ioaddr(ec->podaddr);
+ ec->irqaddr = addr;
if (ec->cid.is) {
ec->irqmask = ec->cid.irqmask;
@@ -1048,10 +1042,8 @@ ecard_probe(int slot, card_type_t type)
set_irq_flags(ec->irq, IRQF_VALID);
}
-#ifdef IO_EC_MEMC8_BASE
if (slot == 8)
ec->irq = 11;
-#endif
#ifdef CONFIG_ARCH_RPC
/* On RiscPC, only first two slots have DMA capability */
if (slot < 2)
@@ -1097,9 +1089,7 @@ static int __init ecard_init(void)
ecard_probe(slot, ECARD_IOC);
}
-#ifdef IO_EC_MEMC8_BASE
ecard_probe(8, ECARD_IOC);
-#endif
irqhw = ecard_probeirqhw();
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index a87cbf889ff..9ad50c4208a 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -24,6 +24,7 @@
#include <asm/unwind.h>
#include <asm/unistd.h>
#include <asm/tls.h>
+#include <asm/system.h>
#include "entry-header.S"
#include <asm/entry-macro-multi.S>
@@ -262,8 +263,7 @@ __und_svc:
ldr r0, [r4, #-4]
#else
ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
- and r9, r0, #0xf800
- cmp r9, #0xe800 @ 32-bit instruction if xx >= 0
+ cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
ldrhhs r9, [r4] @ bottom 16 bits
orrhs r0, r9, r0, lsl #16
#endif
@@ -440,18 +440,46 @@ __und_usr:
#endif
beq call_fpe
@ Thumb instruction
-#if __LINUX_ARM_ARCH__ >= 7
+#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
+/*
+ * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
+ * can never be supported in a single kernel, this code is not applicable at
+ * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
+ * made about .arch directives.
+ */
+#if __LINUX_ARM_ARCH__ < 7
+/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
+#define NEED_CPU_ARCHITECTURE
+ ldr r5, .LCcpu_architecture
+ ldr r5, [r5]
+ cmp r5, #CPU_ARCH_ARMv7
+ blo __und_usr_unknown
+/*
+ * The following code won't get run unless the running CPU really is v7, so
+ * coding round the lack of ldrht on older arches is pointless. Temporarily
+ * override the assembler target arch with the minimum required instead:
+ */
+ .arch armv6t2
+#endif
2:
ARM( ldrht r5, [r4], #2 )
THUMB( ldrht r5, [r4] )
THUMB( add r4, r4, #2 )
- and r0, r5, #0xf800 @ mask bits 111x x... .... ....
- cmp r0, #0xe800 @ 32bit instruction if xx != 0
+ cmp r5, #0xe800 @ 32bit instruction if xx != 0
blo __und_usr_unknown
3: ldrht r0, [r4]
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
orr r0, r0, r5, lsl #16
+
+#if __LINUX_ARM_ARCH__ < 7
+/* If the target arch was overridden, change it back: */
+#ifdef CONFIG_CPU_32v6K
+ .arch armv6k
#else
+ .arch armv6
+#endif
+#endif /* __LINUX_ARM_ARCH__ < 7 */
+#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
b __und_usr_unknown
#endif
UNWIND(.fnend )
@@ -578,6 +606,12 @@ call_fpe:
movw_pc lr @ CP#14 (Debug)
movw_pc lr @ CP#15 (Control)
+#ifdef NEED_CPU_ARCHITECTURE
+ .align 2
+.LCcpu_architecture:
+ .word __cpu_architecture
+#endif
+
#ifdef CONFIG_NEON
.align 6
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 742b6108a00..239703dbdf4 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -21,6 +21,7 @@
#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/system.h>
+#include <asm/pgtable.h>
#ifdef CONFIG_DEBUG_LL
#include <mach/debug-macro.S>
@@ -38,11 +39,14 @@
#error KERNEL_RAM_VADDR must start at 0xXXXX8000
#endif
+#define PG_DIR_SIZE 0x4000
+#define PMD_ORDER 2
+
.globl swapper_pg_dir
- .equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000
+ .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
.macro pgtbl, rd, phys
- add \rd, \phys, #TEXT_OFFSET - 0x4000
+ add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
.endm
#ifdef CONFIG_XIP_KERNEL
@@ -148,11 +152,11 @@ __create_page_tables:
pgtbl r4, r8 @ page table address
/*
- * Clear the 16K level 1 swapper page table
+ * Clear the swapper page table
*/
mov r0, r4
mov r3, #0
- add r6, r0, #0x4000
+ add r6, r0, #PG_DIR_SIZE
1: str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
@@ -171,30 +175,30 @@ __create_page_tables:
sub r0, r0, r3 @ virt->phys offset
add r5, r5, r0 @ phys __enable_mmu
add r6, r6, r0 @ phys __enable_mmu_end
- mov r5, r5, lsr #20
- mov r6, r6, lsr #20
+ mov r5, r5, lsr #SECTION_SHIFT
+ mov r6, r6, lsr #SECTION_SHIFT
-1: orr r3, r7, r5, lsl #20 @ flags + kernel base
- str r3, [r4, r5, lsl #2] @ identity mapping
- teq r5, r6
- addne r5, r5, #1 @ next section
- bne 1b
+1: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base
+ str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping
+ cmp r5, r6
+ addlo r5, r5, #1 @ next section
+ blo 1b
/*
* Now setup the pagetables for our kernel direct
* mapped region.
*/
mov r3, pc
- mov r3, r3, lsr #20
- orr r3, r7, r3, lsl #20
- add r0, r4, #(KERNEL_START & 0xff000000) >> 18
- str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]!
+ mov r3, r3, lsr #SECTION_SHIFT
+ orr r3, r7, r3, lsl #SECTION_SHIFT
+ add r0, r4, #(KERNEL_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
+ str r3, [r0, #((KERNEL_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
ldr r6, =(KERNEL_END - 1)
- add r0, r0, #4
- add r6, r4, r6, lsr #18
+ add r0, r0, #1 << PMD_ORDER
+ add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
1: cmp r0, r6
- add r3, r3, #1 << 20
- strls r3, [r0], #4
+ add r3, r3, #1 << SECTION_SHIFT
+ strls r3, [r0], #1 << PMD_ORDER
bls 1b
#ifdef CONFIG_XIP_KERNEL
@@ -203,11 +207,11 @@ __create_page_tables:
*/
add r3, r8, #TEXT_OFFSET
orr r3, r3, r7
- add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> 18
- str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]!
+ add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
+ str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> (SECTION_SHIFT - PMD_ORDER)]!
ldr r6, =(_end - 1)
add r0, r0, #4
- add r6, r4, r6, lsr #18
+ add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
1: cmp r0, r6
add r3, r3, #1 << 20
strls r3, [r0], #4
@@ -218,12 +222,12 @@ __create_page_tables:
* Then map boot params address in r2 or
* the first 1MB of ram if boot params address is not specified.
*/
- mov r0, r2, lsr #20
- movs r0, r0, lsl #20
+ mov r0, r2, lsr #SECTION_SHIFT
+ movs r0, r0, lsl #SECTION_SHIFT
moveq r0, r8
sub r3, r0, r8
add r3, r3, #PAGE_OFFSET
- add r3, r4, r3, lsr #18
+ add r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
orr r6, r7, r0
str r6, [r3]
@@ -236,21 +240,21 @@ __create_page_tables:
*/
addruart r7, r3
- mov r3, r3, lsr #20
- mov r3, r3, lsl #2
+ mov r3, r3, lsr #SECTION_SHIFT
+ mov r3, r3, lsl #PMD_ORDER
add r0, r4, r3
rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long)
cmp r3, #0x0800 @ limit to 512MB
movhi r3, #0x0800
add r6, r0, r3
- mov r3, r7, lsr #20
+ mov r3, r7, lsr #SECTION_SHIFT
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
- orr r3, r7, r3, lsl #20
+ orr r3, r7, r3, lsl #SECTION_SHIFT
1: str r3, [r0], #4
- add r3, r3, #1 << 20
- teq r0, r6
- bne 1b
+ add r3, r3, #1 << SECTION_SHIFT
+ cmp r0, r6
+ blo 1b
#else /* CONFIG_DEBUG_ICEDCC */
/* we don't need any serial debugging mappings for ICEDCC */
@@ -262,7 +266,7 @@ __create_page_tables:
* If we're using the NetWinder or CATS, we also need to map
* in the 16550-type serial port for the debug messages
*/
- add r0, r4, #0xff000000 >> 18
+ add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER)
orr r3, r7, #0x7c000000
str r3, [r0]
#endif
@@ -272,10 +276,10 @@ __create_page_tables:
* Similar reasons here - for debug. This is
* only for Acorn RiscPC architectures.
*/
- add r0, r4, #0x02000000 >> 18
+ add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER)
orr r3, r7, #0x02000000
str r3, [r0]
- add r0, r4, #0xd8000000 >> 18
+ add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER)
str r3, [r0]
#endif
#endif
@@ -488,13 +492,8 @@ __fixup_pv_table:
add r5, r5, r3 @ adjust table end address
add r7, r7, r3 @ adjust __pv_phys_offset address
str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset
-#ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
mov r6, r3, lsr #24 @ constant for add/sub instructions
teq r3, r6, lsl #24 @ must be 16MiB aligned
-#else
- mov r6, r3, lsr #16 @ constant for add/sub instructions
- teq r3, r6, lsl #16 @ must be 64kiB aligned
-#endif
THUMB( it ne @ cross section branch )
bne __error
str r6, [r7, #4] @ save to __pv_offset
@@ -510,20 +509,8 @@ ENDPROC(__fixup_pv_table)
.text
__fixup_a_pv_table:
#ifdef CONFIG_THUMB2_KERNEL
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
- lsls r0, r6, #24
- lsr r6, #8
- beq 1f
- clz r7, r0
- lsr r0, #24
- lsl r0, r7
- bic r0, 0x0080
- lsrs r7, #1
- orrcs r0, #0x0080
- orr r0, r0, r7, lsl #12
-#endif
-1: lsls r6, #24
- beq 4f
+ lsls r6, #24
+ beq 2f
clz r7, r6
lsr r6, #24
lsl r6, r7
@@ -532,43 +519,25 @@ __fixup_a_pv_table:
orrcs r6, #0x0080
orr r6, r6, r7, lsl #12
orr r6, #0x4000
- b 4f
-2: @ at this point the C flag is always clear
- add r7, r3
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
- ldrh ip, [r7]
- tst ip, 0x0400 @ the i bit tells us LS or MS byte
- beq 3f
- cmp r0, #0 @ set C flag, and ...
- biceq ip, 0x0400 @ immediate zero value has a special encoding
- streqh ip, [r7] @ that requires the i bit cleared
-#endif
-3: ldrh ip, [r7, #2]
+ b 2f
+1: add r7, r3
+ ldrh ip, [r7, #2]
and ip, 0x8f00
- orrcc ip, r6 @ mask in offset bits 31-24
- orrcs ip, r0 @ mask in offset bits 23-16
+ orr ip, r6 @ mask in offset bits 31-24
strh ip, [r7, #2]
-4: cmp r4, r5
+2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
- bcc 2b
+ bcc 1b
bx lr
#else
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
- and r0, r6, #255 @ offset bits 23-16
- mov r6, r6, lsr #8 @ offset bits 31-24
-#else
- mov r0, #0 @ just in case...
-#endif
- b 3f
-2: ldr ip, [r7, r3]
+ b 2f
+1: ldr ip, [r7, r3]
bic ip, ip, #0x000000ff
- tst ip, #0x400 @ rotate shift tells us LS or MS byte
- orrne ip, ip, r6 @ mask in offset bits 31-24
- orreq ip, ip, r0 @ mask in offset bits 23-16
+ orr ip, ip, r6 @ mask in offset bits 31-24
str ip, [r7, r3]
-3: cmp r4, r5
+2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
- bcc 2b
+ bcc 1b
mov pc, lr
#endif
ENDPROC(__fixup_a_pv_table)
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index de3dcab8610..53919b230e8 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -35,8 +35,8 @@
#include <linux/list.h>
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
-#include <linux/ftrace.h>
+#include <asm/exception.h>
#include <asm/system.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index e59bbd496c3..c1b4463dcc8 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -32,6 +32,24 @@ static atomic_t waiting_for_crash_ipi;
int machine_kexec_prepare(struct kimage *image)
{
+ unsigned long page_list;
+ void *reboot_code_buffer;
+ page_list = image->head & PAGE_MASK;
+
+ reboot_code_buffer = page_address(image->control_code_page);
+
+ /* Prepare parameters for reboot_code_buffer*/
+ kexec_start_address = image->start;
+ kexec_indirection_page = page_list;
+ kexec_mach_type = machine_arch_type;
+ kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
+
+ /* copy our kernel relocation code to the control code page */
+ memcpy(reboot_code_buffer,
+ relocate_new_kernel, relocate_new_kernel_size);
+
+ flush_icache_range((unsigned long) reboot_code_buffer,
+ (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
return 0;
}
@@ -82,31 +100,14 @@ void (*kexec_reinit)(void);
void machine_kexec(struct kimage *image)
{
- unsigned long page_list;
unsigned long reboot_code_buffer_phys;
void *reboot_code_buffer;
-
- page_list = image->head & PAGE_MASK;
-
/* we need both effective and real address here */
reboot_code_buffer_phys =
page_to_pfn(image->control_code_page) << PAGE_SHIFT;
reboot_code_buffer = page_address(image->control_code_page);
- /* Prepare parameters for reboot_code_buffer*/
- kexec_start_address = image->start;
- kexec_indirection_page = page_list;
- kexec_mach_type = machine_arch_type;
- kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
-
- /* copy our kernel relocation code to the control code page */
- memcpy(reboot_code_buffer,
- relocate_new_kernel, relocate_new_kernel_size);
-
-
- flush_icache_range((unsigned long) reboot_code_buffer,
- (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
printk(KERN_INFO "Bye!\n");
if (kexec_reinit)
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index cc2020c2c70..1e9be5d25e5 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -33,7 +33,7 @@
* recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
*/
#undef MODULES_VADDR
-#define MODULES_VADDR (((unsigned long)_etext + ~PGDIR_MASK) & PGDIR_MASK)
+#define MODULES_VADDR (((unsigned long)_etext + ~PMD_MASK) & PMD_MASK)
#endif
#ifdef CONFIG_MMU
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4c851834f68..6be3e2e4d83 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -321,8 +321,8 @@ static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] =
ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 1a347f481e5..fd0814076ff 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -319,7 +319,7 @@ void show_regs(struct pt_regs * regs)
printk("\n");
printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
__show_regs(regs);
- __backtrace();
+ dump_stack();
}
ATOMIC_NOTIFIER_HEAD(thread_notify_head);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index e514c76043b..3fe93f75b55 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -29,6 +29,8 @@
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/memblock.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
#include <asm/unified.h>
#include <asm/cpu.h>
@@ -42,6 +44,7 @@
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
#include <asm/tlbflush.h>
+#include <asm/system.h>
#include <asm/prom.h>
#include <asm/mach/arch.h>
@@ -115,6 +118,13 @@ struct outer_cache_fns outer_cache __read_mostly;
EXPORT_SYMBOL(outer_cache);
#endif
+/*
+ * Cached cpu_architecture() result for use by assembler code.
+ * C code should use the cpu_architecture() function instead of accessing this
+ * variable directly.
+ */
+int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
+
struct stack {
u32 irq[3];
u32 abt[3];
@@ -210,7 +220,7 @@ static const char *proc_arch[] = {
"?(17)",
};
-int cpu_architecture(void)
+static int __get_cpu_architecture(void)
{
int cpu_arch;
@@ -243,11 +253,22 @@ int cpu_architecture(void)
return cpu_arch;
}
+int __pure cpu_architecture(void)
+{
+ BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
+
+ return __cpu_architecture;
+}
+
static int cpu_has_aliasing_icache(unsigned int arch)
{
int aliasing_icache;
unsigned int id_reg, num_sets, line_size;
+ /* PIPT caches never alias. */
+ if (icache_is_pipt())
+ return 0;
+
/* arch specifies the register format */
switch (arch) {
case CPU_ARCH_ARMv7:
@@ -282,8 +303,14 @@ static void __init cacheid_init(void)
/* ARMv7 register format */
arch = CPU_ARCH_ARMv7;
cacheid = CACHEID_VIPT_NONALIASING;
- if ((cachetype & (3 << 14)) == 1 << 14)
+ switch (cachetype & (3 << 14)) {
+ case (1 << 14):
cacheid |= CACHEID_ASID_TAGGED;
+ break;
+ case (3 << 14):
+ cacheid |= CACHEID_PIPT;
+ break;
+ }
} else {
arch = CPU_ARCH_ARMv6;
if (cachetype & (1 << 23))
@@ -300,10 +327,11 @@ static void __init cacheid_init(void)
printk("CPU: %s data cache, %s instruction cache\n",
cache_is_vivt() ? "VIVT" :
cache_is_vipt_aliasing() ? "VIPT aliasing" :
- cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
+ cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
cache_is_vivt() ? "VIVT" :
icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
icache_is_vipt_aliasing() ? "VIPT aliasing" :
+ icache_is_pipt() ? "PIPT" :
cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
}
@@ -414,6 +442,7 @@ static void __init setup_processor(void)
}
cpu_name = list->cpu_name;
+ __cpu_architecture = __get_cpu_architecture();
#ifdef MULTI_CPU
processor = *list->proc;
@@ -861,7 +890,7 @@ static struct machine_desc * __init setup_machine_tags(unsigned int nr)
}
if (mdesc->fixup)
- mdesc->fixup(mdesc, tags, &from, &meminfo);
+ mdesc->fixup(tags, &from, &meminfo);
if (tags->hdr.tag == ATAG_CORE) {
if (meminfo.nr_banks != 0)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index d88ff0230e8..854ce33715f 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -16,7 +16,6 @@
#include <linux/cache.h>
#include <linux/profile.h>
#include <linux/errno.h>
-#include <linux/ftrace.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/cpu.h>
@@ -31,6 +30,8 @@
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
+#include <asm/exception.h>
+#include <asm/topology.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -39,6 +40,7 @@
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
#include <asm/localtimer.h>
+#include <asm/smp_plat.h>
/*
* as from 2.5, kernels no longer have an init_tasks structure
@@ -259,6 +261,20 @@ void __ref cpu_die(void)
}
#endif /* CONFIG_HOTPLUG_CPU */
+int __cpu_logical_map[NR_CPUS];
+
+void __init smp_setup_processor_id(void)
+{
+ int i;
+ u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
+
+ cpu_logical_map(0) = cpu;
+ for (i = 1; i < NR_CPUS; ++i)
+ cpu_logical_map(i) = i == cpu ? 0 : i;
+
+ printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
+}
+
/*
* Called by both boot and secondaries to move global data into
* per-processor storage.
@@ -268,6 +284,8 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
cpu_info->loops_per_jiffy = loops_per_jiffy;
+
+ store_cpu_topology(cpuid);
}
/*
@@ -301,17 +319,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
*/
platform_secondary_init(cpu);
- /*
- * Enable local interrupts.
- */
notify_cpu_starting(cpu);
- local_irq_enable();
- local_fiq_enable();
-
- /*
- * Setup the percpu timer for this CPU.
- */
- percpu_timer_setup();
calibrate_delay();
@@ -323,10 +331,23 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
* before we continue.
*/
set_cpu_online(cpu, true);
+
+ /*
+ * Setup the percpu timer for this CPU.
+ */
+ percpu_timer_setup();
+
while (!cpu_active(cpu))
cpu_relax();
/*
+ * cpu_active bit is set, so it's safe to enalbe interrupts
+ * now.
+ */
+ local_irq_enable();
+ local_fiq_enable();
+
+ /*
* OK, it's off to the idle thread for us
*/
cpu_idle();
@@ -358,6 +379,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned int ncores = num_possible_cpus();
+ init_cpu_topology();
+
smp_store_cpu_info(smp_processor_id());
/*
@@ -460,6 +483,11 @@ static void ipi_timer(void)
#ifdef CONFIG_LOCAL_TIMERS
asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
{
+ handle_local_timer(regs);
+}
+
+void handle_local_timer(struct pt_regs *regs)
+{
struct pt_regs *old_regs = set_irq_regs(regs);
int cpu = smp_processor_id();
@@ -567,6 +595,11 @@ static void ipi_cpu_stop(unsigned int cpu)
*/
asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
{
+ handle_IPI(ipinr, regs);
+}
+
+void handle_IPI(int ipinr, struct pt_regs *regs)
+{
unsigned int cpu = smp_processor_id();
struct pt_regs *old_regs = set_irq_regs(regs);
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 79ed5e7f204..8f5dd796335 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -13,6 +13,7 @@
#include <asm/smp_scu.h>
#include <asm/cacheflush.h>
+#include <asm/cputype.h>
#define SCU_CTRL 0x00
#define SCU_CONFIG 0x04
@@ -33,10 +34,19 @@ unsigned int __init scu_get_core_count(void __iomem *scu_base)
/*
* Enable the SCU
*/
-void __init scu_enable(void __iomem *scu_base)
+void scu_enable(void __iomem *scu_base)
{
u32 scu_ctrl;
+#ifdef CONFIG_ARM_ERRATA_764369
+ /* Cortex-A9 only */
+ if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {
+ scu_ctrl = __raw_readl(scu_base + 0x30);
+ if (!(scu_ctrl & 1))
+ __raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
+ }
+#endif
+
scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
/* already enabled? */
if (scu_ctrl & 1)
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index cb634c3e28e..5a54b95d6bd 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -39,13 +39,11 @@
*/
static struct sys_timer *system_timer;
-#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE)
+#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || \
+ defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE)
/* this needs a better home */
DEFINE_SPINLOCK(rtc_lock);
-
-#ifdef CONFIG_RTC_DRV_CMOS_MODULE
EXPORT_SYMBOL(rtc_lock);
-#endif
#endif /* pc-style 'CMOS' RTC support */
/* change this if you have some constant time drift */
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
new file mode 100644
index 00000000000..1040c00405d
--- /dev/null
+++ b/arch/arm/kernel/topology.c
@@ -0,0 +1,148 @@
+/*
+ * arch/arm/kernel/topology.c
+ *
+ * Copyright (C) 2011 Linaro Limited.
+ * Written by: Vincent Guittot
+ *
+ * based on arch/sh/kernel/topology.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/node.h>
+#include <linux/nodemask.h>
+#include <linux/sched.h>
+
+#include <asm/cputype.h>
+#include <asm/topology.h>
+
+#define MPIDR_SMP_BITMASK (0x3 << 30)
+#define MPIDR_SMP_VALUE (0x2 << 30)
+
+#define MPIDR_MT_BITMASK (0x1 << 24)
+
+/*
+ * These masks reflect the current use of the affinity levels.
+ * The affinity level can be up to 16 bits according to ARM ARM
+ */
+
+#define MPIDR_LEVEL0_MASK 0x3
+#define MPIDR_LEVEL0_SHIFT 0
+
+#define MPIDR_LEVEL1_MASK 0xF
+#define MPIDR_LEVEL1_SHIFT 8
+
+#define MPIDR_LEVEL2_MASK 0xFF
+#define MPIDR_LEVEL2_SHIFT 16
+
+struct cputopo_arm cpu_topology[NR_CPUS];
+
+const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+{
+ return &cpu_topology[cpu].core_sibling;
+}
+
+/*
+ * store_cpu_topology is called at boot when only one cpu is running
+ * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
+ * which prevents simultaneous write access to cpu_topology array
+ */
+void store_cpu_topology(unsigned int cpuid)
+{
+ struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
+ unsigned int mpidr;
+ unsigned int cpu;
+
+ /* If the cpu topology has been already set, just return */
+ if (cpuid_topo->core_id != -1)
+ return;
+
+ mpidr = read_cpuid_mpidr();
+
+ /* create cpu topology mapping */
+ if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
+ /*
+ * This is a multiprocessor system
+ * multiprocessor format & multiprocessor mode field are set
+ */
+
+ if (mpidr & MPIDR_MT_BITMASK) {
+ /* core performance interdependency */
+ cpuid_topo->thread_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
+ & MPIDR_LEVEL0_MASK;
+ cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
+ & MPIDR_LEVEL1_MASK;
+ cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL2_SHIFT)
+ & MPIDR_LEVEL2_MASK;
+ } else {
+ /* largely independent cores */
+ cpuid_topo->thread_id = -1;
+ cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
+ & MPIDR_LEVEL0_MASK;
+ cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
+ & MPIDR_LEVEL1_MASK;
+ }
+ } else {
+ /*
+ * This is an uniprocessor system
+ * we are in multiprocessor format but uniprocessor system
+ * or in the old uniprocessor format
+ */
+ cpuid_topo->thread_id = -1;
+ cpuid_topo->core_id = 0;
+ cpuid_topo->socket_id = -1;
+ }
+
+ /* update core and thread sibling masks */
+ for_each_possible_cpu(cpu) {
+ struct cputopo_arm *cpu_topo = &cpu_topology[cpu];
+
+ if (cpuid_topo->socket_id == cpu_topo->socket_id) {
+ cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
+ if (cpu != cpuid)
+ cpumask_set_cpu(cpu,
+ &cpuid_topo->core_sibling);
+
+ if (cpuid_topo->core_id == cpu_topo->core_id) {
+ cpumask_set_cpu(cpuid,
+ &cpu_topo->thread_sibling);
+ if (cpu != cpuid)
+ cpumask_set_cpu(cpu,
+ &cpuid_topo->thread_sibling);
+ }
+ }
+ }
+ smp_wmb();
+
+ printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
+ cpuid, cpu_topology[cpuid].thread_id,
+ cpu_topology[cpuid].core_id,
+ cpu_topology[cpuid].socket_id, mpidr);
+}
+
+/*
+ * init_cpu_topology is called at boot when only one cpu is running
+ * which prevent simultaneous write access to cpu_topology array
+ */
+void init_cpu_topology(void)
+{
+ unsigned int cpu;
+
+ /* init core mask */
+ for_each_possible_cpu(cpu) {
+ struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
+
+ cpu_topo->thread_id = -1;
+ cpu_topo->core_id = -1;
+ cpu_topo->socket_id = -1;
+ cpumask_clear(&cpu_topo->core_sibling);
+ cpumask_clear(&cpu_topo->thread_sibling);
+ }
+ smp_wmb();
+}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index bc9f9da782c..7f5b99eb2c5 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -21,12 +21,14 @@
#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/kexec.h>
+#include <linux/bug.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
+#include <asm/exception.h>
#include <asm/system.h>
#include <asm/unistd.h>
#include <asm/traps.h>
@@ -270,6 +272,8 @@ void die(const char *str, struct pt_regs *regs, int err)
spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
+ if (!user_mode(regs))
+ report_bug(regs->ARM_pc, regs);
ret = __die(str, err, thread, regs);
if (regs && kexec_should_crash(thread->task))
@@ -301,6 +305,24 @@ void arm_notify_die(const char *str, struct pt_regs *regs,
}
}
+#ifdef CONFIG_GENERIC_BUG
+
+int is_valid_bugaddr(unsigned long pc)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+ unsigned short bkpt;
+#else
+ unsigned long bkpt;
+#endif
+
+ if (probe_kernel_address((unsigned *)pc, bkpt))
+ return 0;
+
+ return bkpt == BUG_INSTR_VALUE;
+}
+
+#endif
+
static LIST_HEAD(undef_hook);
static DEFINE_SPINLOCK(undef_lock);
@@ -706,16 +728,6 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs)
arm_notify_die("unknown data abort code", regs, &info, instr, 0);
}
-void __attribute__((noreturn)) __bug(const char *file, int line)
-{
- printk(KERN_CRIT"kernel BUG at %s:%d!\n", file, line);
- *(int *)0 = 0;
-
- /* Avoid "noreturn function does return" */
- for (;;);
-}
-EXPORT_SYMBOL(__bug);
-
void __readwrite_bug(const char *fn)
{
printk("%s called, but not implemented\n", fn);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index bf977f8514f..20b3041e086 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -21,10 +21,13 @@
#define ARM_CPU_KEEP(x)
#endif
-#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
+#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
+ defined(CONFIG_GENERIC_BUG)
#define ARM_EXIT_KEEP(x) x
+#define ARM_EXIT_DISCARD(x)
#else
#define ARM_EXIT_KEEP(x)
+#define ARM_EXIT_DISCARD(x) x
#endif
OUTPUT_ARCH(arm)
@@ -39,6 +42,11 @@ jiffies = jiffies_64 + 4;
SECTIONS
{
/*
+ * XXX: The linker does not define how output sections are
+ * assigned to input sections when there are multiple statements
+ * matching the same input section name. There is no documented
+ * order of matching.
+ *
* unwind exit sections must be discarded before the rest of the
* unwind sections get included.
*/
@@ -47,6 +55,9 @@ SECTIONS
*(.ARM.extab.exit.text)
ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
+ ARM_EXIT_DISCARD(EXIT_TEXT)
+ ARM_EXIT_DISCARD(EXIT_DATA)
+ EXIT_CALL
#ifndef CONFIG_HOTPLUG
*(.ARM.exidx.devexit.text)
*(.ARM.extab.devexit.text)
@@ -58,6 +69,8 @@ SECTIONS
#ifndef CONFIG_SMP_ON_UP
*(.alt.smp.init)
#endif
+ *(.discard)
+ *(.discard.*)
}
#ifdef CONFIG_XIP_KERNEL
@@ -279,9 +292,6 @@ SECTIONS
STABS_DEBUG
.comment 0 : { *(.comment) }
-
- /* Default discards */
- DISCARDS
}
/*
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index a673297b0cf..cd07b5814c2 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -22,15 +22,10 @@
#define mask r7
#define offset r8
-ENTRY(__backtrace)
- mov r1, #0x10
- mov r0, fp
-
ENTRY(c_backtrace)
#if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK)
mov pc, lr
-ENDPROC(__backtrace)
ENDPROC(c_backtrace)
#else
stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location...
@@ -107,7 +102,6 @@ for_each_frame: tst frame, mask @ Check for address exceptions
mov r1, frame
bl printk
no_frame: ldmfd sp!, {r4 - r8, pc}
-ENDPROC(__backtrace)
ENDPROC(c_backtrace)
.pushsection __ex_table,"a"
diff --git a/arch/arm/lib/div64.S b/arch/arm/lib/div64.S
index faa7748142d..e55c4842c29 100644
--- a/arch/arm/lib/div64.S
+++ b/arch/arm/lib/div64.S
@@ -13,6 +13,7 @@
*/
#include <linux/linkage.h>
+#include <asm/unwind.h>
#ifdef __ARMEB__
#define xh r0
@@ -44,6 +45,7 @@
*/
ENTRY(__do_div64)
+UNWIND(.fnstart)
@ Test for easy paths first.
subs ip, r4, #1
@@ -189,7 +191,12 @@ ENTRY(__do_div64)
moveq yh, xh
moveq xh, #0
moveq pc, lr
+UNWIND(.fnend)
+UNWIND(.fnstart)
+UNWIND(.pad #4)
+UNWIND(.save {lr})
+Ldiv0_64:
@ Division by 0:
str lr, [sp, #-8]!
bl __div0
@@ -200,4 +207,5 @@ ENTRY(__do_div64)
mov xh, #0
ldr pc, [sp], #8
+UNWIND(.fnend)
ENDPROC(__do_div64)
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 8b9b13649f8..025f742dd4d 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <linux/hardirq.h> /* for in_atomic() */
#include <linux/gfp.h>
+#include <linux/highmem.h>
#include <asm/current.h>
#include <asm/page.h>
diff --git a/arch/arm/mach-at91/Makefile.boot b/arch/arm/mach-at91/Makefile.boot
index 3462b815054..9ab5a3e5f4f 100644
--- a/arch/arm/mach-at91/Makefile.boot
+++ b/arch/arm/mach-at91/Makefile.boot
@@ -4,15 +4,15 @@
# INITRD_PHYS must be in RAM
ifeq ($(CONFIG_ARCH_AT91CAP9),y)
- zreladdr-y := 0x70008000
+ zreladdr-y += 0x70008000
params_phys-y := 0x70000100
initrd_phys-y := 0x70410000
else ifeq ($(CONFIG_ARCH_AT91SAM9G45),y)
- zreladdr-y := 0x70008000
+ zreladdr-y += 0x70008000
params_phys-y := 0x70000100
initrd_phys-y := 0x70410000
else
- zreladdr-y := 0x20008000
+ zreladdr-y += 0x20008000
params_phys-y := 0x20000100
initrd_phys-y := 0x20410000
endif
diff --git a/arch/arm/mach-bcmring/Makefile.boot b/arch/arm/mach-bcmring/Makefile.boot
index fb53b283beb..aef2467757f 100644
--- a/arch/arm/mach-bcmring/Makefile.boot
+++ b/arch/arm/mach-bcmring/Makefile.boot
@@ -1,6 +1,6 @@
# Address where decompressor will be written and eventually executed.
#
# default to SDRAM
-zreladdr-y := $(CONFIG_BCM_ZRELADDR)
+zreladdr-y += $(CONFIG_BCM_ZRELADDR)
params_phys-y := 0x00000800
diff --git a/arch/arm/mach-bcmring/arch.c b/arch/arm/mach-bcmring/arch.c
index a604b9ebb50..31a143592c8 100644
--- a/arch/arm/mach-bcmring/arch.c
+++ b/arch/arm/mach-bcmring/arch.c
@@ -136,8 +136,8 @@ static void __init bcmring_init_machine(void)
*
*****************************************************************************/
-static void __init bcmring_fixup(struct machine_desc *desc,
- struct tag *t, char **cmdline, struct meminfo *mi) {
+static void __init bcmring_fixup(struct tag *t, char **cmdline,
+ struct meminfo *mi) {
#ifdef CONFIG_BLK_DEV_INITRD
printk(KERN_NOTICE "bcmring_fixup\n");
t->hdr.tag = ATAG_CORE;
diff --git a/arch/arm/mach-clps711x/Makefile.boot b/arch/arm/mach-clps711x/Makefile.boot
index a51fcef64fe..9398e859b5a 100644
--- a/arch/arm/mach-clps711x/Makefile.boot
+++ b/arch/arm/mach-clps711x/Makefile.boot
@@ -1,5 +1,5 @@
# The standard locations for stuff on CLPS711x type processors
- zreladdr-y := 0xc0028000
+ zreladdr-y += 0xc0028000
params_phys-y := 0xc0000100
# Should probably have some agreement on these...
initrd_phys-$(CONFIG_ARCH_P720T) := 0xc0400000
diff --git a/arch/arm/mach-clps711x/clep7312.c b/arch/arm/mach-clps711x/clep7312.c
index 67b5abb4a60..0a2e74feb24 100644
--- a/arch/arm/mach-clps711x/clep7312.c
+++ b/arch/arm/mach-clps711x/clep7312.c
@@ -26,8 +26,7 @@
#include "common.h"
static void __init
-fixup_clep7312(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+fixup_clep7312(struct tag *tags, char **cmdline, struct meminfo *mi)
{
mi->nr_banks=1;
mi->bank[0].start = 0xc0000000;
diff --git a/arch/arm/mach-clps711x/edb7211-arch.c b/arch/arm/mach-clps711x/edb7211-arch.c
index 98ca5b2e940..725a7a54ba4 100644
--- a/arch/arm/mach-clps711x/edb7211-arch.c
+++ b/arch/arm/mach-clps711x/edb7211-arch.c
@@ -37,8 +37,7 @@ static void __init edb7211_reserve(void)
}
static void __init
-fixup_edb7211(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+fixup_edb7211(struct tag *tags, char **cmdline, struct meminfo *mi)
{
/*
* Bank start addresses are not present in the information
diff --git a/arch/arm/mach-clps711x/fortunet.c b/arch/arm/mach-clps711x/fortunet.c
index b1cb479e71e..1947b30f9b8 100644
--- a/arch/arm/mach-clps711x/fortunet.c
+++ b/arch/arm/mach-clps711x/fortunet.c
@@ -57,8 +57,7 @@ typedef struct tag_IMAGE_PARAMS
#define IMAGE_PARAMS_PHYS 0xC01F0000
static void __init
-fortunet_fixup(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+fortunet_fixup(struct tag *tags, char **cmdline, struct meminfo *mi)
{
IMAGE_PARAMS *ip = phys_to_virt(IMAGE_PARAMS_PHYS);
*cmdline = phys_to_virt(ip->command_line);
diff --git a/arch/arm/mach-clps711x/p720t.c b/arch/arm/mach-clps711x/p720t.c
index cefbce0480b..3f796e0d328 100644
--- a/arch/arm/mach-clps711x/p720t.c
+++ b/arch/arm/mach-clps711x/p720t.c
@@ -56,8 +56,7 @@ static struct map_desc p720t_io_desc[] __initdata = {
};
static void __init
-fixup_p720t(struct machine_desc *desc, struct tag *tag,
- char **cmdline, struct meminfo *mi)
+fixup_p720t(struct tag *tag, char **cmdline, struct meminfo *mi)
{
/*
* Our bootloader doesn't setup any tags (yet).
diff --git a/arch/arm/mach-cns3xxx/Makefile.boot b/arch/arm/mach-cns3xxx/Makefile.boot
index 77701286522..d079de0b6e3 100644
--- a/arch/arm/mach-cns3xxx/Makefile.boot
+++ b/arch/arm/mach-cns3xxx/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00C00000
diff --git a/arch/arm/mach-davinci/Makefile.boot b/arch/arm/mach-davinci/Makefile.boot
index db97ef2c647..04a6c4e67b1 100644
--- a/arch/arm/mach-davinci/Makefile.boot
+++ b/arch/arm/mach-davinci/Makefile.boot
@@ -2,12 +2,12 @@ ifeq ($(CONFIG_ARCH_DAVINCI_DA8XX),y)
ifeq ($(CONFIG_ARCH_DAVINCI_DMx),y)
$(error Cannot enable DaVinci and DA8XX platforms concurrently)
else
- zreladdr-y := 0xc0008000
+ zreladdr-y += 0xc0008000
params_phys-y := 0xc0000100
initrd_phys-y := 0xc0800000
endif
else
- zreladdr-y := 0x80008000
+ zreladdr-y += 0x80008000
params_phys-y := 0x80000100
initrd_phys-y := 0x80800000
endif
diff --git a/arch/arm/mach-dove/Makefile.boot b/arch/arm/mach-dove/Makefile.boot
index 67039c3e0c4..760a0efe758 100644
--- a/arch/arm/mach-dove/Makefile.boot
+++ b/arch/arm/mach-dove/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 83dce859886..a9e0dae86a2 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -158,7 +158,7 @@ void __init dove_spi0_init(void)
void __init dove_spi1_init(void)
{
- orion_spi_init(DOVE_SPI1_PHYS_BASE, get_tclk());
+ orion_spi_1_init(DOVE_SPI1_PHYS_BASE, get_tclk());
}
/*****************************************************************************
diff --git a/arch/arm/mach-ebsa110/Makefile.boot b/arch/arm/mach-ebsa110/Makefile.boot
index 23212604493..83cf07c38ad 100644
--- a/arch/arm/mach-ebsa110/Makefile.boot
+++ b/arch/arm/mach-ebsa110/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000400
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-ebsa110/include/mach/io.h b/arch/arm/mach-ebsa110/include/mach/io.h
index f68daa632af..44679db672f 100644
--- a/arch/arm/mach-ebsa110/include/mach/io.h
+++ b/arch/arm/mach-ebsa110/include/mach/io.h
@@ -13,8 +13,6 @@
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-#define IO_SPACE_LIMIT 0xffff
-
u8 __inb8(unsigned int port);
void __outb8(u8 val, unsigned int port);
diff --git a/arch/arm/mach-ep93xx/Makefile.boot b/arch/arm/mach-ep93xx/Makefile.boot
index 0ad33f15c62..d3113a71cb4 100644
--- a/arch/arm/mach-ep93xx/Makefile.boot
+++ b/arch/arm/mach-ep93xx/Makefile.boot
@@ -1,14 +1,14 @@
- zreladdr-$(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET) := 0x00008000
+ zreladdr-$(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET) += 0x00008000
params_phys-$(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET) := 0x00000100
- zreladdr-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET) := 0xc0008000
+ zreladdr-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET) += 0xc0008000
params_phys-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET) := 0xc0000100
- zreladdr-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET) := 0xd0008000
+ zreladdr-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET) += 0xd0008000
params_phys-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET) := 0xd0000100
- zreladdr-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET) := 0xe0008000
+ zreladdr-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET) += 0xe0008000
params_phys-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET) := 0xe0000100
- zreladdr-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET) := 0xf0008000
+ zreladdr-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET) += 0xf0008000
params_phys-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET) := 0xf0000100
diff --git a/arch/arm/mach-exynos4/Kconfig b/arch/arm/mach-exynos4/Kconfig
index 0c77ab99fa1..fc1f92dfbea 100644
--- a/arch/arm/mach-exynos4/Kconfig
+++ b/arch/arm/mach-exynos4/Kconfig
@@ -12,6 +12,7 @@ if ARCH_EXYNOS4
config CPU_EXYNOS4210
bool
select S3C_PL330_DMA
+ select ARM_CPU_SUSPEND if PM
help
Enable EXYNOS4210 CPU support
diff --git a/arch/arm/mach-exynos4/Makefile.boot b/arch/arm/mach-exynos4/Makefile.boot
index d65956ffb43..b9862e22bf1 100644
--- a/arch/arm/mach-exynos4/Makefile.boot
+++ b/arch/arm/mach-exynos4/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0x40008000
+ zreladdr-y += 0x40008000
params_phys-y := 0x40000100
diff --git a/arch/arm/mach-exynos4/clock.c b/arch/arm/mach-exynos4/clock.c
index 1561b036a9b..86964d2e9e1 100644
--- a/arch/arm/mach-exynos4/clock.c
+++ b/arch/arm/mach-exynos4/clock.c
@@ -899,8 +899,7 @@ static struct clksrc_clk clksrcs[] = {
.reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 28, .size = 4 },
}, {
.clk = {
- .name = "sclk_cam",
- .devname = "exynos4-fimc.0",
+ .name = "sclk_cam0",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 16),
},
@@ -909,8 +908,7 @@ static struct clksrc_clk clksrcs[] = {
.reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 16, .size = 4 },
}, {
.clk = {
- .name = "sclk_cam",
- .devname = "exynos4-fimc.1",
+ .name = "sclk_cam1",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 20),
},
@@ -1160,7 +1158,7 @@ void __init_or_cpufreq exynos4_setup_clocks(void)
vpllsrc = clk_get_rate(&clk_vpllsrc.clk);
vpll = s5p_get_pll46xx(vpllsrc, __raw_readl(S5P_VPLL_CON0),
- __raw_readl(S5P_VPLL_CON1), pll_4650);
+ __raw_readl(S5P_VPLL_CON1), pll_4650c);
clk_fout_apll.ops = &exynos4_fout_apll_ops;
clk_fout_mpll.rate = mpll;
diff --git a/arch/arm/mach-exynos4/mct.c b/arch/arm/mach-exynos4/mct.c
index 1ae059b7ad7..ddd86864fb8 100644
--- a/arch/arm/mach-exynos4/mct.c
+++ b/arch/arm/mach-exynos4/mct.c
@@ -132,12 +132,18 @@ static cycle_t exynos4_frc_read(struct clocksource *cs)
return ((cycle_t)hi << 32) | lo;
}
+static void exynos4_frc_resume(struct clocksource *cs)
+{
+ exynos4_mct_frc_start(0, 0);
+}
+
struct clocksource mct_frc = {
.name = "mct-frc",
.rating = 400,
.read = exynos4_frc_read,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .resume = exynos4_frc_resume,
};
static void __init exynos4_clocksource_init(void)
@@ -389,9 +395,11 @@ static void exynos4_mct_tick_init(struct clock_event_device *evt)
}
/* Setup the local clock events for a CPU */
-void __cpuinit local_timer_setup(struct clock_event_device *evt)
+int __cpuinit local_timer_setup(struct clock_event_device *evt)
{
exynos4_mct_tick_init(evt);
+
+ return 0;
}
int local_timer_ack(void)
diff --git a/arch/arm/mach-exynos4/platsmp.c b/arch/arm/mach-exynos4/platsmp.c
index 7c2282c6ba8..0c90896ad9a 100644
--- a/arch/arm/mach-exynos4/platsmp.c
+++ b/arch/arm/mach-exynos4/platsmp.c
@@ -106,6 +106,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
*/
spin_lock(&boot_lock);
spin_unlock(&boot_lock);
+
+ set_cpu_online(cpu, true);
}
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -191,12 +193,10 @@ void __init smp_init_cpus(void)
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
/* sanity check */
- if (ncores > NR_CPUS) {
- printk(KERN_WARNING
- "EXYNOS4: no. of cores (%d) greater than configured "
- "maximum of %d - clipping\n",
- ncores, NR_CPUS);
- ncores = NR_CPUS;
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
}
for (i = 0; i < ncores; i++)
diff --git a/arch/arm/mach-exynos4/setup-keypad.c b/arch/arm/mach-exynos4/setup-keypad.c
index 1ee0ebff111..7862bfb5933 100644
--- a/arch/arm/mach-exynos4/setup-keypad.c
+++ b/arch/arm/mach-exynos4/setup-keypad.c
@@ -19,15 +19,16 @@ void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)
if (rows > 8) {
/* Set all the necessary GPX2 pins: KP_ROW[0~7] */
- s3c_gpio_cfgrange_nopull(EXYNOS4_GPX2(0), 8, S3C_GPIO_SFN(3));
+ s3c_gpio_cfgall_range(EXYNOS4_GPX2(0), 8, S3C_GPIO_SFN(3),
+ S3C_GPIO_PULL_UP);
/* Set all the necessary GPX3 pins: KP_ROW[8~] */
- s3c_gpio_cfgrange_nopull(EXYNOS4_GPX3(0), (rows - 8),
- S3C_GPIO_SFN(3));
+ s3c_gpio_cfgall_range(EXYNOS4_GPX3(0), (rows - 8),
+ S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
} else {
/* Set all the necessary GPX2 pins: KP_ROW[x] */
- s3c_gpio_cfgrange_nopull(EXYNOS4_GPX2(0), rows,
- S3C_GPIO_SFN(3));
+ s3c_gpio_cfgall_range(EXYNOS4_GPX2(0), rows, S3C_GPIO_SFN(3),
+ S3C_GPIO_PULL_UP);
}
/* Set all the necessary GPX1 pins to special-function 3: KP_COL[x] */
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig
index c8e7afcf14e..f643ef819da 100644
--- a/arch/arm/mach-footbridge/Kconfig
+++ b/arch/arm/mach-footbridge/Kconfig
@@ -4,8 +4,8 @@ menu "Footbridge Implementations"
config ARCH_CATS
bool "CATS"
- select CLKSRC_I8253
select CLKEVT_I8253
+ select CLKSRC_I8253
select FOOTBRIDGE_HOST
select ISA
select ISA_DMA
@@ -61,8 +61,8 @@ config ARCH_EBSA285_HOST
config ARCH_NETWINDER
bool "NetWinder"
- select CLKSRC_I8253
select CLKEVT_I8253
+ select CLKSRC_I8253
select FOOTBRIDGE_HOST
select ISA
select ISA_DMA
diff --git a/arch/arm/mach-footbridge/Makefile.boot b/arch/arm/mach-footbridge/Makefile.boot
index c7e75acfe6c..ff0a4b5b0a8 100644
--- a/arch/arm/mach-footbridge/Makefile.boot
+++ b/arch/arm/mach-footbridge/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-footbridge/cats-hw.c b/arch/arm/mach-footbridge/cats-hw.c
index 5b1a8db779b..206ff2f39d6 100644
--- a/arch/arm/mach-footbridge/cats-hw.c
+++ b/arch/arm/mach-footbridge/cats-hw.c
@@ -76,8 +76,7 @@ __initcall(cats_hw_init);
* hard reboots fail on early boards.
*/
static void __init
-fixup_cats(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+fixup_cats(struct tag *tags, char **cmdline, struct meminfo *mi)
{
screen_info.orig_video_lines = 25;
screen_info.orig_video_points = 16;
diff --git a/arch/arm/mach-footbridge/include/mach/io.h b/arch/arm/mach-footbridge/include/mach/io.h
index 32e4cc397c2..15a70396c27 100644
--- a/arch/arm/mach-footbridge/include/mach/io.h
+++ b/arch/arm/mach-footbridge/include/mach/io.h
@@ -23,8 +23,6 @@
#define PCIO_SIZE 0x00100000
#define PCIO_BASE MMU_IO(0xff000000, 0x7c000000)
-#define IO_SPACE_LIMIT 0xffff
-
/*
* Translation of various region addresses to virtual addresses
*/
diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c
index 06e514f372d..4cbc2e65ce3 100644
--- a/arch/arm/mach-footbridge/netwinder-hw.c
+++ b/arch/arm/mach-footbridge/netwinder-hw.c
@@ -631,8 +631,7 @@ __initcall(nw_hw_init);
* the parameter page.
*/
static void __init
-fixup_netwinder(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+fixup_netwinder(struct tag *tags, char **cmdline, struct meminfo *mi)
{
#ifdef CONFIG_ISAPNP
extern int isapnp_disable;
diff --git a/arch/arm/mach-gemini/Makefile.boot b/arch/arm/mach-gemini/Makefile.boot
index 22a52c228d9..683f52b20e3 100644
--- a/arch/arm/mach-gemini/Makefile.boot
+++ b/arch/arm/mach-gemini/Makefile.boot
@@ -1,9 +1,9 @@
ifeq ($(CONFIG_GEMINI_MEM_SWAP),y)
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
else
- zreladdr-y := 0x10008000
+ zreladdr-y += 0x10008000
params_phys-y := 0x10000100
initrd_phys-y := 0x10800000
endif
diff --git a/arch/arm/mach-h720x/Makefile.boot b/arch/arm/mach-h720x/Makefile.boot
index 52984017bd9..d875a7094df 100644
--- a/arch/arm/mach-h720x/Makefile.boot
+++ b/arch/arm/mach-h720x/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-$(CONFIG_ARCH_H720X) := 0x40008000
+ zreladdr-$(CONFIG_ARCH_H720X) += 0x40008000
diff --git a/arch/arm/mach-imx/Makefile.boot b/arch/arm/mach-imx/Makefile.boot
index ebee18b3884..dbe61201bcd 100644
--- a/arch/arm/mach-imx/Makefile.boot
+++ b/arch/arm/mach-imx/Makefile.boot
@@ -1,19 +1,19 @@
-zreladdr-$(CONFIG_ARCH_MX1) := 0x08008000
+zreladdr-$(CONFIG_ARCH_MX1) += 0x08008000
params_phys-$(CONFIG_ARCH_MX1) := 0x08000100
initrd_phys-$(CONFIG_ARCH_MX1) := 0x08800000
-zreladdr-$(CONFIG_MACH_MX21) := 0xC0008000
+zreladdr-$(CONFIG_MACH_MX21) += 0xC0008000
params_phys-$(CONFIG_MACH_MX21) := 0xC0000100
initrd_phys-$(CONFIG_MACH_MX21) := 0xC0800000
-zreladdr-$(CONFIG_ARCH_MX25) := 0x80008000
+zreladdr-$(CONFIG_ARCH_MX25) += 0x80008000
params_phys-$(CONFIG_ARCH_MX25) := 0x80000100
initrd_phys-$(CONFIG_ARCH_MX25) := 0x80800000
-zreladdr-$(CONFIG_MACH_MX27) := 0xA0008000
+zreladdr-$(CONFIG_MACH_MX27) += 0xA0008000
params_phys-$(CONFIG_MACH_MX27) := 0xA0000100
initrd_phys-$(CONFIG_MACH_MX27) := 0xA0800000
-zreladdr-$(CONFIG_ARCH_MX3) := 0x80008000
+zreladdr-$(CONFIG_ARCH_MX3) += 0x80008000
params_phys-$(CONFIG_ARCH_MX3) := 0x80000100
initrd_phys-$(CONFIG_ARCH_MX3) := 0x80800000
diff --git a/arch/arm/mach-integrator/Makefile.boot b/arch/arm/mach-integrator/Makefile.boot
index c7e75acfe6c..ff0a4b5b0a8 100644
--- a/arch/arm/mach-integrator/Makefile.boot
+++ b/arch/arm/mach-integrator/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index 77315b99568..82ebc8d772d 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -126,6 +126,10 @@ static struct clk_lookup lookups[] = {
{ /* Bus clock */
.con_id = "apb_pclk",
.clk = &dummy_apb_pclk,
+ }, {
+ /* Integrator/AP timer frequency */
+ .dev_id = "ap_timer",
+ .clk = &clk24mhz,
}, { /* UART0 */
.dev_id = "mb:16",
.clk = &uartclk,
diff --git a/arch/arm/mach-integrator/include/mach/io.h b/arch/arm/mach-integrator/include/mach/io.h
index f21bb5493dd..37beed3fa3e 100644
--- a/arch/arm/mach-integrator/include/mach/io.h
+++ b/arch/arm/mach-integrator/include/mach/io.h
@@ -20,8 +20,6 @@
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-#define IO_SPACE_LIMIT 0xffff
-
/*
* WARNING: this has to mirror definitions in platform.h
*/
diff --git a/arch/arm/mach-integrator/include/mach/platform.h b/arch/arm/mach-integrator/include/mach/platform.h
index 5e6ea5cfea6..ec467baade0 100644
--- a/arch/arm/mach-integrator/include/mach/platform.h
+++ b/arch/arm/mach-integrator/include/mach/platform.h
@@ -13,9 +13,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-/* DO NOT EDIT!! - this file automatically generated
- * from .s file by awk -f s2h.awk
- */
/**************************************************************************
* * Copyright © ARM Limited 1998. All rights reserved.
* ***********************************************************************/
@@ -399,15 +396,6 @@
#define INTEGRATOR_TIMER1_BASE (INTEGRATOR_CT_BASE + 0x100)
#define INTEGRATOR_TIMER2_BASE (INTEGRATOR_CT_BASE + 0x200)
-#define TICKS_PER_uSEC 24
-
-/*
- * These are useconds NOT ticks.
- *
- */
-#define mSEC_1 1000
-#define mSEC_10 (mSEC_1 * 10)
-
#define INTEGRATOR_CSR_BASE 0x10000000
#define INTEGRATOR_CSR_SIZE 0x10000000
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index fcf0ae95651..f2119908a0b 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -32,6 +32,8 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mtd/physmap.h>
+#include <linux/clk.h>
+#include <video/vga.h>
#include <mach/hardware.h>
#include <mach/platform.h>
@@ -154,6 +156,7 @@ static struct map_desc ap_io_desc[] __initdata = {
static void __init ap_map_io(void)
{
iotable_init(ap_io_desc, ARRAY_SIZE(ap_io_desc));
+ vga_base = PCI_MEMORY_VADDR;
}
#define INTEGRATOR_SC_VALID_INT 0x003fffff
@@ -320,27 +323,16 @@ static void __init ap_init(void)
#define TIMER1_VA_BASE IO_ADDRESS(INTEGRATOR_TIMER1_BASE)
#define TIMER2_VA_BASE IO_ADDRESS(INTEGRATOR_TIMER2_BASE)
-/*
- * How long is the timer interval?
- */
-#define TIMER_INTERVAL (TICKS_PER_uSEC * mSEC_10)
-#if TIMER_INTERVAL >= 0x100000
-#define TICKS2USECS(x) (256 * (x) / TICKS_PER_uSEC)
-#elif TIMER_INTERVAL >= 0x10000
-#define TICKS2USECS(x) (16 * (x) / TICKS_PER_uSEC)
-#else
-#define TICKS2USECS(x) ((x) / TICKS_PER_uSEC)
-#endif
-
static unsigned long timer_reload;
-static void integrator_clocksource_init(u32 khz)
+static void integrator_clocksource_init(unsigned long inrate)
{
void __iomem *base = (void __iomem *)TIMER2_VA_BASE;
u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
+ unsigned long rate = inrate;
- if (khz >= 1500) {
- khz /= 16;
+ if (rate >= 1500000) {
+ rate /= 16;
ctrl |= TIMER_CTRL_DIV16;
}
@@ -348,7 +340,7 @@ static void integrator_clocksource_init(u32 khz)
writel(ctrl, base + TIMER_CTRL);
clocksource_mmio_init(base + TIMER_VALUE, "timer2",
- khz * 1000, 200, 16, clocksource_mmio_readl_down);
+ rate, 200, 16, clocksource_mmio_readl_down);
}
static void __iomem * const clkevt_base = (void __iomem *)TIMER1_VA_BASE;
@@ -372,15 +364,29 @@ static void clkevt_set_mode(enum clock_event_mode mode, struct clock_event_devic
{
u32 ctrl = readl(clkevt_base + TIMER_CTRL) & ~TIMER_CTRL_ENABLE;
- BUG_ON(mode == CLOCK_EVT_MODE_ONESHOT);
+ /* Disable timer */
+ writel(ctrl, clkevt_base + TIMER_CTRL);
- if (mode == CLOCK_EVT_MODE_PERIODIC) {
- writel(ctrl, clkevt_base + TIMER_CTRL);
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ /* Enable the timer and start the periodic tick */
writel(timer_reload, clkevt_base + TIMER_LOAD);
ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* Leave the timer disabled, .set_next_event will enable it */
+ ctrl &= ~TIMER_CTRL_PERIODIC;
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_RESUME:
+ default:
+ /* Just leave in disabled state */
+ break;
}
- writel(ctrl, clkevt_base + TIMER_CTRL);
}
static int clkevt_set_next_event(unsigned long next, struct clock_event_device *evt)
@@ -396,12 +402,10 @@ static int clkevt_set_next_event(unsigned long next, struct clock_event_device *
static struct clock_event_device integrator_clockevent = {
.name = "timer1",
- .shift = 34,
- .features = CLOCK_EVT_FEAT_PERIODIC,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_mode = clkevt_set_mode,
.set_next_event = clkevt_set_next_event,
.rating = 300,
- .cpumask = cpu_all_mask,
};
static struct irqaction integrator_timer_irq = {
@@ -411,29 +415,27 @@ static struct irqaction integrator_timer_irq = {
.dev_id = &integrator_clockevent,
};
-static void integrator_clockevent_init(u32 khz)
+static void integrator_clockevent_init(unsigned long inrate)
{
- struct clock_event_device *evt = &integrator_clockevent;
+ unsigned long rate = inrate;
unsigned int ctrl = 0;
- if (khz * 1000 > 0x100000 * HZ) {
- khz /= 256;
+ /* Calculate and program a divisor */
+ if (rate > 0x100000 * HZ) {
+ rate /= 256;
ctrl |= TIMER_CTRL_DIV256;
- } else if (khz * 1000 > 0x10000 * HZ) {
- khz /= 16;
+ } else if (rate > 0x10000 * HZ) {
+ rate /= 16;
ctrl |= TIMER_CTRL_DIV16;
}
-
- timer_reload = khz * 1000 / HZ;
+ timer_reload = rate / HZ;
writel(ctrl, clkevt_base + TIMER_CTRL);
- evt->irq = IRQ_TIMERINT1;
- evt->mult = div_sc(khz, NSEC_PER_MSEC, evt->shift);
- evt->max_delta_ns = clockevent_delta2ns(0xffff, evt);
- evt->min_delta_ns = clockevent_delta2ns(0xf, evt);
-
setup_irq(IRQ_TIMERINT1, &integrator_timer_irq);
- clockevents_register_device(evt);
+ clockevents_config_and_register(&integrator_clockevent,
+ rate,
+ 1,
+ 0xffffU);
}
/*
@@ -441,14 +443,20 @@ static void integrator_clockevent_init(u32 khz)
*/
static void __init ap_init_timer(void)
{
- u32 khz = TICKS_PER_uSEC * 1000;
+ struct clk *clk;
+ unsigned long rate;
+
+ clk = clk_get_sys("ap_timer", NULL);
+ BUG_ON(IS_ERR(clk));
+ clk_enable(clk);
+ rate = clk_get_rate(clk);
writel(0, TIMER0_VA_BASE + TIMER_CTRL);
writel(0, TIMER1_VA_BASE + TIMER_CTRL);
writel(0, TIMER2_VA_BASE + TIMER_CTRL);
- integrator_clocksource_init(khz);
- integrator_clockevent_init(khz);
+ integrator_clocksource_init(rate);
+ integrator_clockevent_init(rate);
}
static struct sys_timer ap_timer = {
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index dd56bfb351e..11b86e5b71c 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -27,7 +27,6 @@
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <video/vga.h>
#include <mach/hardware.h>
#include <mach/platform.h>
@@ -505,7 +504,6 @@ void __init pci_v3_preinit(void)
pcibios_min_io = 0x6000;
pcibios_min_mem = 0x00100000;
- vga_base = PCI_MEMORY_VADDR;
/*
* Hook in our fault handler for PCI errors
diff --git a/arch/arm/mach-iop13xx/Makefile.boot b/arch/arm/mach-iop13xx/Makefile.boot
index 0b0e19fdfe6..3a8c38c3189 100644
--- a/arch/arm/mach-iop13xx/Makefile.boot
+++ b/arch/arm/mach-iop13xx/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-iop32x/Makefile.boot b/arch/arm/mach-iop32x/Makefile.boot
index 47000dccd61..0a833b11e38 100644
--- a/arch/arm/mach-iop32x/Makefile.boot
+++ b/arch/arm/mach-iop32x/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0xa0008000
+ zreladdr-y += 0xa0008000
params_phys-y := 0xa0000100
initrd_phys-y := 0xa0800000
diff --git a/arch/arm/mach-iop33x/Makefile.boot b/arch/arm/mach-iop33x/Makefile.boot
index 67039c3e0c4..760a0efe758 100644
--- a/arch/arm/mach-iop33x/Makefile.boot
+++ b/arch/arm/mach-iop33x/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-ixp2000/Makefile.boot b/arch/arm/mach-ixp2000/Makefile.boot
index d84c5807a43..9c7af91d93d 100644
--- a/arch/arm/mach-ixp2000/Makefile.boot
+++ b/arch/arm/mach-ixp2000/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
diff --git a/arch/arm/mach-ixp23xx/Makefile.boot b/arch/arm/mach-ixp23xx/Makefile.boot
index d5561ad15ba..44fb4a717c3 100644
--- a/arch/arm/mach-ixp23xx/Makefile.boot
+++ b/arch/arm/mach-ixp23xx/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
diff --git a/arch/arm/mach-ixp4xx/Makefile.boot b/arch/arm/mach-ixp4xx/Makefile.boot
index d84c5807a43..9c7af91d93d 100644
--- a/arch/arm/mach-ixp4xx/Makefile.boot
+++ b/arch/arm/mach-ixp4xx/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index 2131832ee6b..85245e48099 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -397,7 +397,8 @@ void __init ixp4xx_pci_preinit(void)
local_write_config(PCI_BASE_ADDRESS_0, 4, PHYS_OFFSET);
local_write_config(PCI_BASE_ADDRESS_1, 4, PHYS_OFFSET + SZ_16M);
local_write_config(PCI_BASE_ADDRESS_2, 4, PHYS_OFFSET + SZ_32M);
- local_write_config(PCI_BASE_ADDRESS_3, 4, PHYS_OFFSET + SZ_48M);
+ local_write_config(PCI_BASE_ADDRESS_3, 4,
+ PHYS_OFFSET + SZ_32M + SZ_16M);
/*
* Enable CSR window at 64 MiB to allow PCI masters
diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h
index 57b5410c31f..ffb9d6afb89 100644
--- a/arch/arm/mach-ixp4xx/include/mach/io.h
+++ b/arch/arm/mach-ixp4xx/include/mach/io.h
@@ -17,8 +17,6 @@
#include <mach/hardware.h>
-#define IO_SPACE_LIMIT 0x0000ffff
-
extern int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data);
extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
diff --git a/arch/arm/mach-kirkwood/Makefile.boot b/arch/arm/mach-kirkwood/Makefile.boot
index 67039c3e0c4..760a0efe758 100644
--- a/arch/arm/mach-kirkwood/Makefile.boot
+++ b/arch/arm/mach-kirkwood/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-ks8695/Makefile.boot b/arch/arm/mach-ks8695/Makefile.boot
index 48eb2cb3ac7..c9b0bebcf23 100644
--- a/arch/arm/mach-ks8695/Makefile.boot
+++ b/arch/arm/mach-ks8695/Makefile.boot
@@ -3,6 +3,6 @@
# PARAMS_PHYS must be within 4MB of ZRELADDR
# INITRD_PHYS must be in RAM
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-lpc32xx/Makefile.boot b/arch/arm/mach-lpc32xx/Makefile.boot
index b796b41ebf8..2cfe0ee635c 100644
--- a/arch/arm/mach-lpc32xx/Makefile.boot
+++ b/arch/arm/mach-lpc32xx/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x80008000
+ zreladdr-y += 0x80008000
params_phys-y := 0x80000100
initrd_phys-y := 0x82000000
diff --git a/arch/arm/mach-mmp/Makefile.boot b/arch/arm/mach-mmp/Makefile.boot
index 574a4aa8321..5edf03e2bee 100644
--- a/arch/arm/mach-mmp/Makefile.boot
+++ b/arch/arm/mach-mmp/Makefile.boot
@@ -1 +1 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
diff --git a/arch/arm/mach-mmp/aspenite.c b/arch/arm/mach-mmp/aspenite.c
index 06b5fa853c9..49c5d6d843d 100644
--- a/arch/arm/mach-mmp/aspenite.c
+++ b/arch/arm/mach-mmp/aspenite.c
@@ -160,7 +160,7 @@ static struct mtd_partition aspenite_nand_partitions[] = {
}, {
.name = "filesystem",
.offset = MTDPART_OFS_APPEND,
- .size = SZ_48M,
+ .size = SZ_32M + SZ_16M,
.mask_flags = 0,
}
};
diff --git a/arch/arm/mach-mmp/ttc_dkb.c b/arch/arm/mach-mmp/ttc_dkb.c
index 6bd37a27e5f..176515a7698 100644
--- a/arch/arm/mach-mmp/ttc_dkb.c
+++ b/arch/arm/mach-mmp/ttc_dkb.c
@@ -93,7 +93,7 @@ static struct mtd_partition ttc_dkb_onenand_partitions[] = {
}, {
.name = "filesystem",
.offset = MTDPART_OFS_APPEND,
- .size = SZ_48M,
+ .size = SZ_32M + SZ_16M,
.mask_flags = 0,
}
};
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
index 24dfbf8c07c..9b803a578b4 100644
--- a/arch/arm/mach-msm/Makefile.boot
+++ b/arch/arm/mach-msm/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x10008000
+ zreladdr-y += 0x10008000
params_phys-y := 0x10000100
initrd_phys-y := 0x10800000
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index 18a3c97bc86..f81ef1f9d46 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -78,8 +78,8 @@ static void __init halibut_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
}
-static void __init halibut_fixup(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+static void __init halibut_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
mi->nr_banks=1;
mi->bank[0].start = PHYS_OFFSET;
diff --git a/arch/arm/mach-msm/board-mahimahi.c b/arch/arm/mach-msm/board-mahimahi.c
index 7a9a03eb189..1df15aa3c66 100644
--- a/arch/arm/mach-msm/board-mahimahi.c
+++ b/arch/arm/mach-msm/board-mahimahi.c
@@ -53,8 +53,8 @@ static void __init mahimahi_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
}
-static void __init mahimahi_fixup(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+static void __init mahimahi_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
mi->nr_banks = 2;
mi->bank[0].start = PHYS_OFFSET;
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
index b7a84966b71..d1e4cc83b1e 100644
--- a/arch/arm/mach-msm/board-msm7x30.c
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -24,6 +24,7 @@
#include <linux/smsc911x.h>
#include <linux/usb/msm_hsusb.h>
#include <linux/clkdev.h>
+#include <linux/memblock.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -42,6 +43,21 @@
extern struct sys_timer msm_timer;
+static void __init msm7x30_fixup(struct machine_desc *desc, struct tag *tag,
+ char **cmdline, struct meminfo *mi)
+{
+ for (; tag->hdr.size; tag = tag_next(tag))
+ if (tag->hdr.tag == ATAG_MEM && tag->u.mem.start == 0x200000) {
+ tag->u.mem.start = 0;
+ tag->u.mem.size += SZ_2M;
+ }
+}
+
+static void __init msm7x30_reserve(void)
+{
+ memblock_remove(0x0, SZ_2M);
+}
+
static int hsusb_phy_init_seq[] = {
0x30, 0x32, /* Enable and set Pre-Emphasis Depth to 20% */
0x02, 0x36, /* Disable CDR Auto Reset feature */
@@ -107,6 +123,8 @@ static void __init msm7x30_map_io(void)
MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
.boot_params = PLAT_PHYS_OFFSET + 0x100,
+ .fixup = msm7x30_fixup,
+ .reserve = msm7x30_reserve,
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,
@@ -115,6 +133,8 @@ MACHINE_END
MACHINE_START(MSM7X30_FFA, "QCT MSM7X30 FFA")
.boot_params = PLAT_PHYS_OFFSET + 0x100,
+ .fixup = msm7x30_fixup,
+ .reserve = msm7x30_reserve,
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,
@@ -123,6 +143,8 @@ MACHINE_END
MACHINE_START(MSM7X30_FLUID, "QCT MSM7X30 FLUID")
.boot_params = PLAT_PHYS_OFFSET + 0x100,
+ .fixup = msm7x30_fixup,
+ .reserve = msm7x30_reserve,
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,
diff --git a/arch/arm/mach-msm/board-msm8960.c b/arch/arm/mach-msm/board-msm8960.c
index 35c7ceeb3f2..b04468e7d00 100644
--- a/arch/arm/mach-msm/board-msm8960.c
+++ b/arch/arm/mach-msm/board-msm8960.c
@@ -20,16 +20,34 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/clkdev.h>
+#include <linux/memblock.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
+#include <asm/setup.h>
#include <mach/board.h>
#include <mach/msm_iomap.h>
#include "devices.h"
+static void __init msm8960_fixup(struct machine_desc *desc, struct tag *tag,
+ char **cmdline, struct meminfo *mi)
+{
+ for (; tag->hdr.size; tag = tag_next(tag))
+ if (tag->hdr.tag == ATAG_MEM &&
+ tag->u.mem.start == 0x40200000) {
+ tag->u.mem.start = 0x40000000;
+ tag->u.mem.size += SZ_2M;
+ }
+}
+
+static void __init msm8960_reserve(void)
+{
+ memblock_remove(0x40000000, SZ_2M);
+}
+
static void __init msm8960_map_io(void)
{
msm_map_msm8960_io();
@@ -76,6 +94,8 @@ static void __init msm8960_rumi3_init(void)
}
MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
+ .fixup = msm8960_fixup,
+ .reserve = msm8960_reserve,
.map_io = msm8960_map_io,
.init_irq = msm8960_init_irq,
.timer = &msm_timer,
@@ -83,6 +103,8 @@ MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
MACHINE_END
MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3")
+ .fixup = msm8960_fixup,
+ .reserve = msm8960_reserve,
.map_io = msm8960_map_io,
.init_irq = msm8960_init_irq,
.timer = &msm_timer,
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 1163b6fd05d..9221f54778b 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -20,14 +20,31 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/memblock.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
+#include <asm/setup.h>
#include <mach/board.h>
#include <mach/msm_iomap.h>
+static void __init msm8x60_fixup(struct machine_desc *desc, struct tag *tag,
+ char **cmdline, struct meminfo *mi)
+{
+ for (; tag->hdr.size; tag = tag_next(tag))
+ if (tag->hdr.tag == ATAG_MEM &&
+ tag->u.mem.start == 0x40200000) {
+ tag->u.mem.start = 0x40000000;
+ tag->u.mem.size += SZ_2M;
+ }
+}
+
+static void __init msm8x60_reserve(void)
+{
+ memblock_remove(0x40000000, SZ_2M);
+}
static void __init msm8x60_map_io(void)
{
@@ -65,6 +82,8 @@ static void __init msm8x60_init(void)
}
MACHINE_START(MSM8X60_RUMI3, "QCT MSM8X60 RUMI3")
+ .fixup = msm8x60_fixup,
+ .reserve = msm8x60_reserve,
.map_io = msm8x60_map_io,
.init_irq = msm8x60_init_irq,
.init_machine = msm8x60_init,
@@ -72,6 +91,8 @@ MACHINE_START(MSM8X60_RUMI3, "QCT MSM8X60 RUMI3")
MACHINE_END
MACHINE_START(MSM8X60_SURF, "QCT MSM8X60 SURF")
+ .fixup = msm8x60_fixup,
+ .reserve = msm8x60_reserve,
.map_io = msm8x60_map_io,
.init_irq = msm8x60_init_irq,
.init_machine = msm8x60_init,
@@ -79,6 +100,8 @@ MACHINE_START(MSM8X60_SURF, "QCT MSM8X60 SURF")
MACHINE_END
MACHINE_START(MSM8X60_SIM, "QCT MSM8X60 SIMULATOR")
+ .fixup = msm8x60_fixup,
+ .reserve = msm8x60_reserve,
.map_io = msm8x60_map_io,
.init_irq = msm8x60_init_irq,
.init_machine = msm8x60_init,
@@ -86,6 +109,8 @@ MACHINE_START(MSM8X60_SIM, "QCT MSM8X60 SIMULATOR")
MACHINE_END
MACHINE_START(MSM8X60_FFA, "QCT MSM8X60 FFA")
+ .fixup = msm8x60_fixup,
+ .reserve = msm8x60_reserve,
.map_io = msm8x60_map_io,
.init_irq = msm8x60_init_irq,
.init_machine = msm8x60_init,
diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c
index 68f930f07d7..c6e043c896a 100644
--- a/arch/arm/mach-msm/board-sapphire.c
+++ b/arch/arm/mach-msm/board-sapphire.c
@@ -77,8 +77,8 @@ static struct map_desc sapphire_io_desc[] __initdata = {
}
};
-static void __init sapphire_fixup(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+static void __init sapphire_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
int smi_sz = parse_tag_smi((const struct tag *)tags);
diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c
index 814386772c6..7acd2021ada 100644
--- a/arch/arm/mach-msm/board-trout.c
+++ b/arch/arm/mach-msm/board-trout.c
@@ -48,8 +48,8 @@ static void __init trout_init_irq(void)
msm_init_irq();
}
-static void __init trout_fixup(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+static void __init trout_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
mi->nr_banks = 1;
mi->bank[0].start = PHYS_OFFSET;
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index f2f8d299ba9..58d5e7eec43 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -22,11 +22,11 @@
#elif defined(CONFIG_ARCH_QSD8X50)
#define PLAT_PHYS_OFFSET UL(0x20000000)
#elif defined(CONFIG_ARCH_MSM7X30)
-#define PLAT_PHYS_OFFSET UL(0x00200000)
+#define PLAT_PHYS_OFFSET UL(0x00000000)
#elif defined(CONFIG_ARCH_MSM8X60)
-#define PLAT_PHYS_OFFSET UL(0x40200000)
+#define PLAT_PHYS_OFFSET UL(0x40000000)
#elif defined(CONFIG_ARCH_MSM8960)
-#define PLAT_PHYS_OFFSET UL(0x40200000)
+#define PLAT_PHYS_OFFSET UL(0x40000000)
#else
#define PLAT_PHYS_OFFSET UL(0x10000000)
#endif
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index 1a1af9e5625..72765952091 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -156,6 +156,12 @@ void __init smp_init_cpus(void)
{
unsigned int i, ncores = get_core_count();
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
+ }
+
for (i = 0; i < ncores; i++)
set_cpu_possible(i, true);
diff --git a/arch/arm/mach-mv78xx0/Makefile.boot b/arch/arm/mach-mv78xx0/Makefile.boot
index 67039c3e0c4..760a0efe758 100644
--- a/arch/arm/mach-mv78xx0/Makefile.boot
+++ b/arch/arm/mach-mv78xx0/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-mx5/Makefile.boot b/arch/arm/mach-mx5/Makefile.boot
index e928be1b675..ca207ca305e 100644
--- a/arch/arm/mach-mx5/Makefile.boot
+++ b/arch/arm/mach-mx5/Makefile.boot
@@ -1,9 +1,9 @@
- zreladdr-$(CONFIG_ARCH_MX50) := 0x70008000
+ zreladdr-$(CONFIG_ARCH_MX50) += 0x70008000
params_phys-$(CONFIG_ARCH_MX50) := 0x70000100
initrd_phys-$(CONFIG_ARCH_MX50) := 0x70800000
- zreladdr-$(CONFIG_ARCH_MX51) := 0x90008000
+ zreladdr-$(CONFIG_ARCH_MX51) += 0x90008000
params_phys-$(CONFIG_ARCH_MX51) := 0x90000100
initrd_phys-$(CONFIG_ARCH_MX51) := 0x90800000
- zreladdr-$(CONFIG_ARCH_MX53) := 0x70008000
+ zreladdr-$(CONFIG_ARCH_MX53) += 0x70008000
params_phys-$(CONFIG_ARCH_MX53) := 0x70000100
initrd_phys-$(CONFIG_ARCH_MX53) := 0x70800000
diff --git a/arch/arm/mach-mxs/Makefile.boot b/arch/arm/mach-mxs/Makefile.boot
index eb541e0291d..07b11fe6453 100644
--- a/arch/arm/mach-mxs/Makefile.boot
+++ b/arch/arm/mach-mxs/Makefile.boot
@@ -1 +1 @@
-zreladdr-y := 0x40008000
+zreladdr-y += 0x40008000
diff --git a/arch/arm/mach-netx/Makefile.boot b/arch/arm/mach-netx/Makefile.boot
index b81cf6aff0a..534a4d27055 100644
--- a/arch/arm/mach-netx/Makefile.boot
+++ b/arch/arm/mach-netx/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0x80008000
+ zreladdr-y += 0x80008000
diff --git a/arch/arm/mach-nomadik/Makefile.boot b/arch/arm/mach-nomadik/Makefile.boot
index c7e75acfe6c..ff0a4b5b0a8 100644
--- a/arch/arm/mach-nomadik/Makefile.boot
+++ b/arch/arm/mach-nomadik/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-nuc93x/Makefile.boot b/arch/arm/mach-nuc93x/Makefile.boot
index a057b546b6e..6c3d421c2d1 100644
--- a/arch/arm/mach-nuc93x/Makefile.boot
+++ b/arch/arm/mach-nuc93x/Makefile.boot
@@ -1,3 +1,3 @@
-zreladdr-y := 0x00008000
+zreladdr-y += 0x00008000
params_phys-y := 0x00000100
diff --git a/arch/arm/mach-omap1/Makefile.boot b/arch/arm/mach-omap1/Makefile.boot
index 292d56c5a88..13bda8dbd60 100644
--- a/arch/arm/mach-omap1/Makefile.boot
+++ b/arch/arm/mach-omap1/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x10008000
+ zreladdr-y += 0x10008000
params_phys-y := 0x10000100
initrd_phys-y := 0x10800000
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 57b66d590c5..89bfb49389f 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -36,6 +36,7 @@ config ARCH_OMAP3
select ARM_L1_CACHE_SHIFT_6 if !ARCH_OMAP4
select ARCH_HAS_OPP
select PM_OPP if PM
+ select ARM_CPU_SUSPEND if PM
config ARCH_OMAP4
bool "TI OMAP4"
@@ -50,6 +51,7 @@ config ARCH_OMAP4
select ARCH_HAS_OPP
select PM_OPP if PM
select USB_ARCH_HAS_EHCI
+ select ARM_CPU_SUSPEND if PM
comment "OMAP Core Type"
depends on ARCH_OMAP2
diff --git a/arch/arm/mach-omap2/Makefile.boot b/arch/arm/mach-omap2/Makefile.boot
index 565aff7f37a..b03e562acc6 100644
--- a/arch/arm/mach-omap2/Makefile.boot
+++ b/arch/arm/mach-omap2/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x80008000
+ zreladdr-y += 0x80008000
params_phys-y := 0x80000100
initrd_phys-y := 0x80800000
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index 2028464cf5b..f79b7d2a8ed 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -193,7 +193,8 @@ static int __init omap2430_i2c_init(void)
{
omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo,
ARRAY_SIZE(sdp2430_i2c1_boardinfo));
- omap2_pmic_init("twl4030", &sdp2430_twldata);
+ omap_pmic_init(2, 100, "twl4030", INT_24XX_SYS_NIRQ,
+ &sdp2430_twldata);
return 0;
}
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index a9b45c76e1d..097a42d81e5 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -137,8 +137,7 @@ static void omap4_hsmmc1_before_set_reg(struct device *dev, int slot,
*/
reg = omap4_ctrl_pad_readl(control_pbias_offset);
reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
- OMAP4_MMC1_PWRDNZ_MASK |
- OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
+ OMAP4_MMC1_PWRDNZ_MASK);
omap4_ctrl_pad_writel(reg, control_pbias_offset);
}
@@ -156,8 +155,7 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
else
reg |= OMAP4_MMC1_PBIASLITE_VMODE_MASK;
reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
- OMAP4_MMC1_PWRDNZ_MASK |
- OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
+ OMAP4_MMC1_PWRDNZ_MASK);
omap4_ctrl_pad_writel(reg, control_pbias_offset);
timeout = jiffies + msecs_to_jiffies(5);
@@ -171,16 +169,14 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) {
pr_err("Pbias Voltage is not same as LDO\n");
/* Caution : On VMODE_ERROR Power Down MMC IO */
- reg &= ~(OMAP4_MMC1_PWRDNZ_MASK |
- OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
+ reg &= ~(OMAP4_MMC1_PWRDNZ_MASK);
omap4_ctrl_pad_writel(reg, control_pbias_offset);
}
} else {
reg = omap4_ctrl_pad_readl(control_pbias_offset);
reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
OMAP4_MMC1_PWRDNZ_MASK |
- OMAP4_MMC1_PBIASLITE_VMODE_MASK |
- OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
+ OMAP4_MMC1_PBIASLITE_VMODE_MASK);
omap4_ctrl_pad_writel(reg, control_pbias_offset);
}
}
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index ce65e9329c7..889464dc7b2 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -109,12 +109,10 @@ void __init smp_init_cpus(void)
ncores = scu_get_core_count(scu_base);
/* sanity check */
- if (ncores > NR_CPUS) {
- printk(KERN_WARNING
- "OMAP4: no. of cores (%d) greater than configured "
- "maximum of %d - clipping\n",
- ncores, NR_CPUS);
- ncores = NR_CPUS;
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
}
for (i = 0; i < ncores; i++)
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index a65145b02a5..19e4dac62a8 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -137,9 +137,6 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
musb_plat.mode = board_data->mode;
musb_plat.extvbus = board_data->extvbus;
- if (cpu_is_omap44xx())
- omap4430_phy_init(dev);
-
if (cpu_is_omap3517() || cpu_is_omap3505()) {
oh_name = "am35x_otg_hs";
name = "musb-am35x";
diff --git a/arch/arm/mach-orion5x/Makefile.boot b/arch/arm/mach-orion5x/Makefile.boot
index 67039c3e0c4..760a0efe758 100644
--- a/arch/arm/mach-orion5x/Makefile.boot
+++ b/arch/arm/mach-orion5x/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 0ab531d047f..22ace0bf2f9 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -308,8 +308,8 @@ void __init orion5x_init(void)
* Many orion-based systems have buggy bootloader implementations.
* This is a common fixup for bogus memory tags.
*/
-void __init tag_fixup_mem32(struct machine_desc *mdesc, struct tag *t,
- char **from, struct meminfo *meminfo)
+void __init tag_fixup_mem32(struct tag *t, char **from,
+ struct meminfo *meminfo)
{
for (; t->hdr.size; t = tag_next(t))
if (t->hdr.tag == ATAG_MEM &&
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index 3e5499dda49..909489f4d23 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -53,11 +53,9 @@ int orion5x_pci_sys_setup(int nr, struct pci_sys_data *sys);
struct pci_bus *orion5x_pci_sys_scan_bus(int nr, struct pci_sys_data *sys);
int orion5x_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
-struct machine_desc;
struct meminfo;
struct tag;
-extern void __init tag_fixup_mem32(struct machine_desc *, struct tag *,
- char **, struct meminfo *);
+extern void __init tag_fixup_mem32(struct tag *, char **, struct meminfo *);
#endif
diff --git a/arch/arm/mach-pnx4008/Makefile.boot b/arch/arm/mach-pnx4008/Makefile.boot
index 44c7117e20d..9fa19baa7f2 100644
--- a/arch/arm/mach-pnx4008/Makefile.boot
+++ b/arch/arm/mach-pnx4008/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x80008000
+ zreladdr-y += 0x80008000
params_phys-y := 0x80000100
initrd_phys-y := 0x80800000
diff --git a/arch/arm/mach-prima2/Makefile.boot b/arch/arm/mach-prima2/Makefile.boot
index d023db3ae4f..c77a4883a4e 100644
--- a/arch/arm/mach-prima2/Makefile.boot
+++ b/arch/arm/mach-prima2/Makefile.boot
@@ -1,3 +1,3 @@
-zreladdr-y := 0x00008000
+zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-pxa/Makefile.boot b/arch/arm/mach-pxa/Makefile.boot
index 1ead67178ec..2c1ae92f210 100644
--- a/arch/arm/mach-pxa/Makefile.boot
+++ b/arch/arm/mach-pxa/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0xa0008000
+ zreladdr-y += 0xa0008000
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index b6a51340270..d940e8a7227 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -839,8 +839,8 @@ static void __init cm_x300_init(void)
cm_x300_init_bl();
}
-static void __init cm_x300_fixup(struct machine_desc *mdesc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+static void __init cm_x300_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
/* Make sure that mi->bank[0].start = PHYS_ADDR */
for (; tags->hdr.size; tags = tag_next(tags))
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 185a37cad25..3e9483b0605 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -705,8 +705,8 @@ static void __init corgi_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
}
-static void __init fixup_corgi(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi)
+static void __init fixup_corgi(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
sharpsl_save_param();
mi->nr_banks=1;
diff --git a/arch/arm/mach-pxa/eseries.c b/arch/arm/mach-pxa/eseries.c
index b4599ec9d61..e4a1f4dc89f 100644
--- a/arch/arm/mach-pxa/eseries.c
+++ b/arch/arm/mach-pxa/eseries.c
@@ -41,8 +41,7 @@
#include "clock.h"
/* Only e800 has 128MB RAM */
-void __init eseries_fixup(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi)
+void __init eseries_fixup(struct tag *tags, char **cmdline, struct meminfo *mi)
{
mi->nr_banks=1;
mi->bank[0].start = 0xa0000000;
diff --git a/arch/arm/mach-pxa/eseries.h b/arch/arm/mach-pxa/eseries.h
index 5930f5e2a12..be921965e91 100644
--- a/arch/arm/mach-pxa/eseries.h
+++ b/arch/arm/mach-pxa/eseries.h
@@ -1,5 +1,4 @@
-void __init eseries_fixup(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi);
+void __init eseries_fixup(struct tag *tags, char **cmdline, struct meminfo *mi);
extern struct pxa2xx_udc_mach_info e7xx_udc_mach_info;
extern struct pxaficp_platform_data e7xx_ficp_platform_data;
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index b09e848eb6c..ca607571782 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -19,6 +19,8 @@
#include <linux/io.h>
#include <linux/irq.h>
+#include <asm/exception.h>
+
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/gpio.h>
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index a113ea9ab4a..948ce3e729f 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -454,8 +454,8 @@ static void __init poodle_init(void)
poodle_init_spi();
}
-static void __init fixup_poodle(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi)
+static void __init fixup_poodle(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
sharpsl_save_param();
mi->nr_banks=1;
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c
index df4356e8aca..72001ec6e7b 100644
--- a/arch/arm/mach-pxa/saar.c
+++ b/arch/arm/mach-pxa/saar.c
@@ -540,7 +540,7 @@ static struct mtd_partition saar_onenand_partitions[] = {
}, {
.name = "filesystem",
.offset = MTDPART_OFS_APPEND,
- .size = SZ_48M,
+ .size = SZ_32M + SZ_16M,
.mask_flags = 0,
}
};
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 438c7b5e451..d8dec9113aa 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -970,8 +970,8 @@ static void __init spitz_init(void)
spitz_i2c_init();
}
-static void __init spitz_fixup(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi)
+static void __init spitz_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
sharpsl_save_param();
mi->nr_banks = 1;
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 9f69a268269..402b0c96613 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -960,8 +960,8 @@ static void __init tosa_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
}
-static void __init fixup_tosa(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi)
+static void __init fixup_tosa(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
sharpsl_save_param();
mi->nr_banks=1;
diff --git a/arch/arm/mach-realview/Makefile.boot b/arch/arm/mach-realview/Makefile.boot
index d97e003d3df..d2c3d788f68 100644
--- a/arch/arm/mach-realview/Makefile.boot
+++ b/arch/arm/mach-realview/Makefile.boot
@@ -1,9 +1,9 @@
ifeq ($(CONFIG_REALVIEW_HIGH_PHYS_OFFSET),y)
- zreladdr-y := 0x70008000
+ zreladdr-y += 0x70008000
params_phys-y := 0x70000100
initrd_phys-y := 0x70800000
else
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
endif
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index 5c23450d2d1..d5ed5d4f77d 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -517,8 +517,7 @@ void __init realview_timer_init(unsigned int timer_irq)
/*
* Setup the memory banks.
*/
-void realview_fixup(struct machine_desc *mdesc, struct tag *tags, char **from,
- struct meminfo *meminfo)
+void realview_fixup(struct tag *tags, char **from, struct meminfo *meminfo)
{
/*
* Most RealView platforms have 512MB contiguous RAM at 0x70000000.
diff --git a/arch/arm/mach-realview/core.h b/arch/arm/mach-realview/core.h
index 5c83d1e87a0..47259c89a75 100644
--- a/arch/arm/mach-realview/core.h
+++ b/arch/arm/mach-realview/core.h
@@ -63,8 +63,8 @@ extern int realview_flash_register(struct resource *res, u32 num);
extern int realview_eth_register(const char *name, struct resource *res);
extern int realview_usb_register(struct resource *res);
extern void realview_init_early(void);
-extern void realview_fixup(struct machine_desc *mdesc, struct tag *tags,
- char **from, struct meminfo *meminfo);
+extern void realview_fixup(struct tag *tags, char **from,
+ struct meminfo *meminfo);
extern void (*realview_reset)(char);
#endif
diff --git a/arch/arm/mach-realview/include/mach/board-pb1176.h b/arch/arm/mach-realview/include/mach/board-pb1176.h
index 002ab5d8c11..2a15fef9473 100644
--- a/arch/arm/mach-realview/include/mach/board-pb1176.h
+++ b/arch/arm/mach-realview/include/mach/board-pb1176.h
@@ -70,6 +70,7 @@
#define REALVIEW_DC1176_GIC_CPU_BASE 0x10120000 /* GIC CPU interface, on devchip */
#define REALVIEW_DC1176_GIC_DIST_BASE 0x10121000 /* GIC distributor, on devchip */
+#define REALVIEW_DC1176_ROM_BASE 0x10200000 /* 16KiB NRAM preudo-ROM, on devchip */
#define REALVIEW_PB1176_GIC_CPU_BASE 0x10040000 /* GIC CPU interface, on FPGA */
#define REALVIEW_PB1176_GIC_DIST_BASE 0x10041000 /* GIC distributor, on FPGA */
#define REALVIEW_PB1176_L220_BASE 0x10110000 /* L220 registers */
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
index 4ae943bafa9..e83c654a58d 100644
--- a/arch/arm/mach-realview/platsmp.c
+++ b/arch/arm/mach-realview/platsmp.c
@@ -52,12 +52,10 @@ void __init smp_init_cpus(void)
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
/* sanity check */
- if (ncores > NR_CPUS) {
- printk(KERN_WARNING
- "Realview: no. of cores (%d) greater than configured "
- "maximum of %d - clipping\n",
- ncores, NR_CPUS);
- ncores = NR_CPUS;
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
}
for (i = 0; i < ncores; i++)
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c
index ad5671acb66..865d440fcf5 100644
--- a/arch/arm/mach-realview/realview_pb1176.c
+++ b/arch/arm/mach-realview/realview_pb1176.c
@@ -26,6 +26,8 @@
#include <linux/amba/pl061.h>
#include <linux/amba/mmci.h>
#include <linux/amba/pl022.h>
+#include <linux/mtd/physmap.h>
+#include <linux/mtd/partitions.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -204,22 +206,48 @@ static struct amba_device *amba_devs[] __initdata = {
* RealView PB1176 platform devices
*/
static struct resource realview_pb1176_flash_resources[] = {
- [0] = {
+ {
.start = REALVIEW_PB1176_FLASH_BASE,
.end = REALVIEW_PB1176_FLASH_BASE + REALVIEW_PB1176_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
},
- [1] = {
+#ifdef CONFIG_REALVIEW_PB1176_SECURE_FLASH
+ {
.start = REALVIEW_PB1176_SEC_FLASH_BASE,
.end = REALVIEW_PB1176_SEC_FLASH_BASE + REALVIEW_PB1176_SEC_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
},
-};
-#ifdef CONFIG_REALVIEW_PB1176_SECURE_FLASH
-#define PB1176_FLASH_BLOCKS 2
-#else
-#define PB1176_FLASH_BLOCKS 1
#endif
+};
+
+static struct physmap_flash_data pb1176_rom_pdata = {
+ .probe_type = "map_rom",
+ .width = 4,
+ .nr_parts = 0,
+};
+
+static struct resource pb1176_rom_resources[] = {
+ /*
+ * This exposes the PB1176 DevChip ROM as an MTD ROM mapping.
+ * The reference manual states that this is actually a pseudo-ROM
+ * programmed in NVRAM.
+ */
+ {
+ .start = REALVIEW_DC1176_ROM_BASE,
+ .end = REALVIEW_DC1176_ROM_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct platform_device pb1176_rom_device = {
+ .name = "physmap-flash",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(pb1176_rom_resources),
+ .resource = pb1176_rom_resources,
+ .dev = {
+ .platform_data = &pb1176_rom_pdata,
+ },
+};
static struct resource realview_pb1176_smsc911x_resources[] = {
[0] = {
@@ -316,8 +344,7 @@ static void realview_pb1176_reset(char mode)
__raw_writel(REALVIEW_PB1176_SYS_SOFT_RESET, reset_ctrl);
}
-static void realview_pb1176_fixup(struct machine_desc *mdesc,
- struct tag *tags, char **from,
+static void realview_pb1176_fixup(struct tag *tags, char **from,
struct meminfo *meminfo)
{
/*
@@ -338,7 +365,8 @@ static void __init realview_pb1176_init(void)
#endif
realview_flash_register(realview_pb1176_flash_resources,
- PB1176_FLASH_BLOCKS);
+ ARRAY_SIZE(realview_pb1176_flash_resources));
+ platform_device_register(&pb1176_rom_device);
realview_eth_register(NULL, realview_pb1176_smsc911x_resources);
platform_device_register(&realview_i2c_device);
realview_usb_register(realview_pb1176_isp1761_resources);
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index 363b0ab5615..3e1eb2eb813 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -319,8 +319,8 @@ static struct sys_timer realview_pbx_timer = {
.init = realview_pbx_timer_init,
};
-static void realview_pbx_fixup(struct machine_desc *mdesc, struct tag *tags,
- char **from, struct meminfo *meminfo)
+static void realview_pbx_fixup(struct tag *tags, char **from,
+ struct meminfo *meminfo)
{
#ifdef CONFIG_SPARSEMEM
/*
@@ -335,7 +335,7 @@ static void realview_pbx_fixup(struct machine_desc *mdesc, struct tag *tags,
meminfo->bank[2].size = SZ_256M;
meminfo->nr_banks = 3;
#else
- realview_fixup(mdesc, tags, from, meminfo);
+ realview_fixup(tags, from, meminfo);
#endif
}
diff --git a/arch/arm/mach-rpc/Makefile.boot b/arch/arm/mach-rpc/Makefile.boot
index 9c9e7685ec7..ae2df0d7d03 100644
--- a/arch/arm/mach-rpc/Makefile.boot
+++ b/arch/arm/mach-rpc/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x10008000
+ zreladdr-y += 0x10008000
params_phys-y := 0x10000100
initrd_phys-y := 0x18000000
diff --git a/arch/arm/mach-rpc/include/mach/hardware.h b/arch/arm/mach-rpc/include/mach/hardware.h
index dde6b3c0e29..050d63c74cc 100644
--- a/arch/arm/mach-rpc/include/mach/hardware.h
+++ b/arch/arm/mach-rpc/include/mach/hardware.h
@@ -36,7 +36,7 @@
#define EASI_SIZE 0x08000000 /* EASI I/O */
#define EASI_START 0x08000000
-#define EASI_BASE 0xe5000000
+#define EASI_BASE IOMEM(0xe5000000)
#define IO_START 0x03000000 /* I/O */
#define IO_SIZE 0x01000000
@@ -51,21 +51,20 @@
/*
* IO Addresses
*/
-#define VIDC_BASE IOMEM(0xe0400000)
-#define EXPMASK_BASE 0xe0360000
-#define IOMD_BASE IOMEM(0xe0200000)
-#define IOC_BASE IOMEM(0xe0200000)
-#define PCIO_BASE IOMEM(0xe0010000)
-#define FLOPPYDMA_BASE IOMEM(0xe002a000)
+#define ECARD_EASI_BASE (EASI_BASE)
+#define VIDC_BASE (IO_BASE + 0x00400000)
+#define EXPMASK_BASE (IO_BASE + 0x00360000)
+#define ECARD_IOC4_BASE (IO_BASE + 0x00270000)
+#define ECARD_IOC_BASE (IO_BASE + 0x00240000)
+#define IOMD_BASE (IO_BASE + 0x00200000)
+#define IOC_BASE (IO_BASE + 0x00200000)
+#define ECARD_MEMC8_BASE (IO_BASE + 0x0002b000)
+#define FLOPPYDMA_BASE (IO_BASE + 0x0002a000)
+#define PCIO_BASE (IO_BASE + 0x00010000)
+#define ECARD_MEMC_BASE (IO_BASE + 0x00000000)
#define vidc_writel(val) __raw_writel(val, VIDC_BASE)
-#define IO_EC_EASI_BASE 0x81400000
-#define IO_EC_IOC4_BASE 0x8009c000
-#define IO_EC_IOC_BASE 0x80090000
-#define IO_EC_MEMC8_BASE 0x8000ac00
-#define IO_EC_MEMC_BASE 0x80000000
-
#define NETSLOT_BASE 0x0302b000
#define NETSLOT_SIZE 0x00001000
diff --git a/arch/arm/mach-rpc/include/mach/io.h b/arch/arm/mach-rpc/include/mach/io.h
index 20da7f486e5..695f4ed2e11 100644
--- a/arch/arm/mach-rpc/include/mach/io.h
+++ b/arch/arm/mach-rpc/include/mach/io.h
@@ -15,195 +15,18 @@
#include <mach/hardware.h>
-#define IO_SPACE_LIMIT 0xffffffff
+#define IO_SPACE_LIMIT 0xffff
/*
- * We use two different types of addressing - PC style addresses, and ARM
- * addresses. PC style accesses the PC hardware with the normal PC IO
- * addresses, eg 0x3f8 for serial#1. ARM addresses are 0x80000000+
- * and are translated to the start of IO. Note that all addresses are
- * shifted left!
- */
-#define __PORT_PCIO(x) (!((x) & 0x80000000))
-
-/*
- * Dynamic IO functions.
- */
-static inline void __outb (unsigned int value, unsigned int port)
-{
- unsigned long temp;
- __asm__ __volatile__(
- "tst %2, #0x80000000\n\t"
- "mov %0, %4\n\t"
- "addeq %0, %0, %3\n\t"
- "strb %1, [%0, %2, lsl #2] @ outb"
- : "=&r" (temp)
- : "r" (value), "r" (port), "Ir" (PCIO_BASE - IO_BASE), "Ir" (IO_BASE)
- : "cc");
-}
-
-static inline void __outw (unsigned int value, unsigned int port)
-{
- unsigned long temp;
- __asm__ __volatile__(
- "tst %2, #0x80000000\n\t"
- "mov %0, %4\n\t"
- "addeq %0, %0, %3\n\t"
- "str %1, [%0, %2, lsl #2] @ outw"
- : "=&r" (temp)
- : "r" (value|value<<16), "r" (port), "Ir" (PCIO_BASE - IO_BASE), "Ir" (IO_BASE)
- : "cc");
-}
-
-static inline void __outl (unsigned int value, unsigned int port)
-{
- unsigned long temp;
- __asm__ __volatile__(
- "tst %2, #0x80000000\n\t"
- "mov %0, %4\n\t"
- "addeq %0, %0, %3\n\t"
- "str %1, [%0, %2, lsl #2] @ outl"
- : "=&r" (temp)
- : "r" (value), "r" (port), "Ir" (PCIO_BASE - IO_BASE), "Ir" (IO_BASE)
- : "cc");
-}
-
-#define DECLARE_DYN_IN(sz,fnsuffix,instr) \
-static inline unsigned sz __in##fnsuffix (unsigned int port) \
-{ \
- unsigned long temp, value; \
- __asm__ __volatile__( \
- "tst %2, #0x80000000\n\t" \
- "mov %0, %4\n\t" \
- "addeq %0, %0, %3\n\t" \
- "ldr" instr " %1, [%0, %2, lsl #2] @ in" #fnsuffix \
- : "=&r" (temp), "=r" (value) \
- : "r" (port), "Ir" (PCIO_BASE - IO_BASE), "Ir" (IO_BASE) \
- : "cc"); \
- return (unsigned sz)value; \
-}
-
-static inline void __iomem *__deprecated __ioaddr(unsigned int port)
-{
- void __iomem *ret;
- if (__PORT_PCIO(port))
- ret = PCIO_BASE;
- else
- ret = IO_BASE;
- return ret + (port << 2);
-}
-
-#define DECLARE_IO(sz,fnsuffix,instr) \
- DECLARE_DYN_IN(sz,fnsuffix,instr)
-
-DECLARE_IO(char,b,"b")
-DECLARE_IO(short,w,"")
-DECLARE_IO(int,l,"")
-
-#undef DECLARE_IO
-#undef DECLARE_DYN_IN
-
-/*
- * Constant address IO functions
+ * We need PC style IO addressing for:
+ * - floppy (at 0x3f2,0x3f4,0x3f5,0x3f7)
+ * - parport (at 0x278-0x27a, 0x27b-0x27f, 0x778-0x77a)
+ * - 8250 serial (only for compile)
*
- * These have to be macros for the 'J' constraint to work -
- * +/-4096 immediate operand.
+ * These peripherals are found in an area of MMIO which looks very much
+ * like an ISA bus, but with registers at the low byte of each word.
*/
-#define __outbc(value,port) \
-({ \
- if (__PORT_PCIO((port))) \
- __asm__ __volatile__( \
- "strb %0, [%1, %2] @ outbc" \
- : : "r" (value), "r" (PCIO_BASE), "Jr" ((port) << 2)); \
- else \
- __asm__ __volatile__( \
- "strb %0, [%1, %2] @ outbc" \
- : : "r" (value), "r" (IO_BASE), "r" ((port) << 2)); \
-})
-
-#define __inbc(port) \
-({ \
- unsigned char result; \
- if (__PORT_PCIO((port))) \
- __asm__ __volatile__( \
- "ldrb %0, [%1, %2] @ inbc" \
- : "=r" (result) : "r" (PCIO_BASE), "Jr" ((port) << 2)); \
- else \
- __asm__ __volatile__( \
- "ldrb %0, [%1, %2] @ inbc" \
- : "=r" (result) : "r" (IO_BASE), "r" ((port) << 2)); \
- result; \
-})
-
-#define __outwc(value,port) \
-({ \
- unsigned long __v = value; \
- if (__PORT_PCIO((port))) \
- __asm__ __volatile__( \
- "str %0, [%1, %2] @ outwc" \
- : : "r" (__v|__v<<16), "r" (PCIO_BASE), "Jr" ((port) << 2)); \
- else \
- __asm__ __volatile__( \
- "str %0, [%1, %2] @ outwc" \
- : : "r" (__v|__v<<16), "r" (IO_BASE), "r" ((port) << 2)); \
-})
-
-#define __inwc(port) \
-({ \
- unsigned short result; \
- if (__PORT_PCIO((port))) \
- __asm__ __volatile__( \
- "ldr %0, [%1, %2] @ inwc" \
- : "=r" (result) : "r" (PCIO_BASE), "Jr" ((port) << 2)); \
- else \
- __asm__ __volatile__( \
- "ldr %0, [%1, %2] @ inwc" \
- : "=r" (result) : "r" (IO_BASE), "r" ((port) << 2)); \
- result & 0xffff; \
-})
-
-#define __outlc(value,port) \
-({ \
- unsigned long __v = value; \
- if (__PORT_PCIO((port))) \
- __asm__ __volatile__( \
- "str %0, [%1, %2] @ outlc" \
- : : "r" (__v), "r" (PCIO_BASE), "Jr" ((port) << 2)); \
- else \
- __asm__ __volatile__( \
- "str %0, [%1, %2] @ outlc" \
- : : "r" (__v), "r" (IO_BASE), "r" ((port) << 2)); \
-})
-
-#define __inlc(port) \
-({ \
- unsigned long result; \
- if (__PORT_PCIO((port))) \
- __asm__ __volatile__( \
- "ldr %0, [%1, %2] @ inlc" \
- : "=r" (result) : "r" (PCIO_BASE), "Jr" ((port) << 2)); \
- else \
- __asm__ __volatile__( \
- "ldr %0, [%1, %2] @ inlc" \
- : "=r" (result) : "r" (IO_BASE), "r" ((port) << 2)); \
- result; \
-})
-
-#define inb(p) (__builtin_constant_p((p)) ? __inbc(p) : __inb(p))
-#define inw(p) (__builtin_constant_p((p)) ? __inwc(p) : __inw(p))
-#define inl(p) (__builtin_constant_p((p)) ? __inlc(p) : __inl(p))
-#define outb(v,p) (__builtin_constant_p((p)) ? __outbc(v,p) : __outb(v,p))
-#define outw(v,p) (__builtin_constant_p((p)) ? __outwc(v,p) : __outw(v,p))
-#define outl(v,p) (__builtin_constant_p((p)) ? __outlc(v,p) : __outl(v,p))
-
-/* the following macro is deprecated */
-#define ioaddr(port) ((unsigned long)__ioaddr((port)))
-
-#define insb(p,d,l) __raw_readsb(__ioaddr(p),d,l)
-#define insw(p,d,l) __raw_readsw(__ioaddr(p),d,l)
-
-#define outsb(p,d,l) __raw_writesb(__ioaddr(p),d,l)
-#define outsw(p,d,l) __raw_writesw(__ioaddr(p),d,l)
+#define __io(a) (PCIO_BASE + ((a) << 2))
/*
* 1:1 mapping for ioremapped regions.
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index 580b3c73d2c..1e0e60d0462 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -74,7 +74,7 @@ static struct map_desc rpc_io_desc[] __initdata = {
.length = IO_SIZE ,
.type = MT_DEVICE
}, { /* EASI space */
- .virtual = EASI_BASE,
+ .virtual = (unsigned long)EASI_BASE,
.pfn = __phys_to_pfn(EASI_START),
.length = EASI_SIZE,
.type = MT_DEVICE
diff --git a/arch/arm/mach-s3c2410/Makefile.boot b/arch/arm/mach-s3c2410/Makefile.boot
index 58c1dd7f8e1..4457605ba04 100644
--- a/arch/arm/mach-s3c2410/Makefile.boot
+++ b/arch/arm/mach-s3c2410/Makefile.boot
@@ -1,7 +1,7 @@
ifeq ($(CONFIG_PM_H1940),y)
- zreladdr-y := 0x30108000
+ zreladdr-y += 0x30108000
params_phys-y := 0x30100100
else
- zreladdr-y := 0x30008000
+ zreladdr-y += 0x30008000
params_phys-y := 0x30000100
endif
diff --git a/arch/arm/mach-s3c2410/include/mach/io.h b/arch/arm/mach-s3c2410/include/mach/io.h
index 9813dbf2ae4..118749f37c4 100644
--- a/arch/arm/mach-s3c2410/include/mach/io.h
+++ b/arch/arm/mach-s3c2410/include/mach/io.h
@@ -199,8 +199,6 @@ DECLARE_IO(int,l,"")
#define outw(v,p) (__builtin_constant_p((p)) ? __outwc(v,p) : __outw(v,p))
#define outl(v,p) (__builtin_constant_p((p)) ? __outlc(v,p) : __outl(v,p))
#define __ioaddr(p) (__builtin_constant_p((p)) ? __ioaddr(p) : __ioaddrc(p))
-/* the following macro is deprecated */
-#define ioaddr(port) __ioaddr((port))
#define insb(p,d,l) __raw_readsb(__ioaddr(p),d,l)
#define insw(p,d,l) __raw_readsw(__ioaddr(p),d,l)
diff --git a/arch/arm/mach-s3c2410/s3c2410.c b/arch/arm/mach-s3c2410/s3c2410.c
index f1d3bd8f6f1..343a540d86a 100644
--- a/arch/arm/mach-s3c2410/s3c2410.c
+++ b/arch/arm/mach-s3c2410/s3c2410.c
@@ -170,7 +170,9 @@ int __init s3c2410_init(void)
{
printk("S3C2410: Initialising architecture\n");
+#ifdef CONFIG_PM
register_syscore_ops(&s3c2410_pm_syscore_ops);
+#endif
register_syscore_ops(&s3c24xx_irq_syscore_ops);
return sysdev_register(&s3c2410_sysdev);
diff --git a/arch/arm/mach-s3c2412/mach-smdk2413.c b/arch/arm/mach-s3c2412/mach-smdk2413.c
index 834cfb61bcf..3391713e0c9 100644
--- a/arch/arm/mach-s3c2412/mach-smdk2413.c
+++ b/arch/arm/mach-s3c2412/mach-smdk2413.c
@@ -92,8 +92,7 @@ static struct platform_device *smdk2413_devices[] __initdata = {
&s3c_device_usbgadget,
};
-static void __init smdk2413_fixup(struct machine_desc *desc,
- struct tag *tags, char **cmdline,
+static void __init smdk2413_fixup(struct tag *tags, char **cmdline,
struct meminfo *mi)
{
if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) {
diff --git a/arch/arm/mach-s3c2412/mach-vstms.c b/arch/arm/mach-s3c2412/mach-vstms.c
index 83544ebe20a..b6ed4573553 100644
--- a/arch/arm/mach-s3c2412/mach-vstms.c
+++ b/arch/arm/mach-s3c2412/mach-vstms.c
@@ -129,9 +129,8 @@ static struct platform_device *vstms_devices[] __initdata = {
&s3c_device_nand,
};
-static void __init vstms_fixup(struct machine_desc *desc,
- struct tag *tags, char **cmdline,
- struct meminfo *mi)
+static void __init vstms_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) {
mi->nr_banks=1;
diff --git a/arch/arm/mach-s3c2412/s3c2412.c b/arch/arm/mach-s3c2412/s3c2412.c
index ef0958d3e5c..57a1e01e4e5 100644
--- a/arch/arm/mach-s3c2412/s3c2412.c
+++ b/arch/arm/mach-s3c2412/s3c2412.c
@@ -245,7 +245,9 @@ int __init s3c2412_init(void)
{
printk("S3C2412: Initialising architecture\n");
+#ifdef CONFIG_PM
register_syscore_ops(&s3c2412_pm_syscore_ops);
+#endif
register_syscore_ops(&s3c24xx_irq_syscore_ops);
return sysdev_register(&s3c2412_sysdev);
diff --git a/arch/arm/mach-s3c2416/s3c2416.c b/arch/arm/mach-s3c2416/s3c2416.c
index 494ce913dc9..20b3fdfb305 100644
--- a/arch/arm/mach-s3c2416/s3c2416.c
+++ b/arch/arm/mach-s3c2416/s3c2416.c
@@ -97,7 +97,9 @@ int __init s3c2416_init(void)
s3c_fb_setname("s3c2443-fb");
+#ifdef CONFIG_PM
register_syscore_ops(&s3c2416_pm_syscore_ops);
+#endif
register_syscore_ops(&s3c24xx_irq_syscore_ops);
return sysdev_register(&s3c2416_sysdev);
diff --git a/arch/arm/mach-s3c2440/s3c2440.c b/arch/arm/mach-s3c2440/s3c2440.c
index ce99ff72838..2270d336021 100644
--- a/arch/arm/mach-s3c2440/s3c2440.c
+++ b/arch/arm/mach-s3c2440/s3c2440.c
@@ -55,7 +55,9 @@ int __init s3c2440_init(void)
/* register suspend/resume handlers */
+#ifdef CONFIG_PM
register_syscore_ops(&s3c2410_pm_syscore_ops);
+#endif
register_syscore_ops(&s3c244x_pm_syscore_ops);
register_syscore_ops(&s3c24xx_irq_syscore_ops);
diff --git a/arch/arm/mach-s3c2440/s3c2442.c b/arch/arm/mach-s3c2440/s3c2442.c
index 9ad99f8016a..6f2b65e6e06 100644
--- a/arch/arm/mach-s3c2440/s3c2442.c
+++ b/arch/arm/mach-s3c2440/s3c2442.c
@@ -169,7 +169,9 @@ int __init s3c2442_init(void)
{
printk("S3C2442: Initialising architecture\n");
+#ifdef CONFIG_PM
register_syscore_ops(&s3c2410_pm_syscore_ops);
+#endif
register_syscore_ops(&s3c244x_pm_syscore_ops);
register_syscore_ops(&s3c24xx_irq_syscore_ops);
diff --git a/arch/arm/mach-s3c2443/clock.c b/arch/arm/mach-s3c2443/clock.c
index a1a7176675b..38058af4897 100644
--- a/arch/arm/mach-s3c2443/clock.c
+++ b/arch/arm/mach-s3c2443/clock.c
@@ -128,7 +128,7 @@ static int s3c2443_armclk_setrate(struct clk *clk, unsigned long rate)
unsigned long clkcon0;
clkcon0 = __raw_readl(S3C2443_CLKDIV0);
- clkcon0 &= S3C2443_CLKDIV0_ARMDIV_MASK;
+ clkcon0 &= ~S3C2443_CLKDIV0_ARMDIV_MASK;
clkcon0 |= val << S3C2443_CLKDIV0_ARMDIV_SHIFT;
__raw_writel(clkcon0, S3C2443_CLKDIV0);
}
diff --git a/arch/arm/mach-s3c64xx/Makefile.boot b/arch/arm/mach-s3c64xx/Makefile.boot
index ba41fdc0a58..c642333af3e 100644
--- a/arch/arm/mach-s3c64xx/Makefile.boot
+++ b/arch/arm/mach-s3c64xx/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0x50008000
+ zreladdr-y += 0x50008000
params_phys-y := 0x50000100
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index ecbea92bf83..a9f3183e029 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -262,45 +262,6 @@ static struct samsung_keypad_platdata smdk6410_keypad_data __initdata = {
.cols = 8,
};
-static int smdk6410_backlight_init(struct device *dev)
-{
- int ret;
-
- ret = gpio_request(S3C64XX_GPF(15), "Backlight");
- if (ret) {
- printk(KERN_ERR "failed to request GPF for PWM-OUT1\n");
- return ret;
- }
-
- /* Configure GPIO pin with S3C64XX_GPF15_PWM_TOUT1 */
- s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_SFN(2));
-
- return 0;
-}
-
-static void smdk6410_backlight_exit(struct device *dev)
-{
- s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_OUTPUT);
- gpio_free(S3C64XX_GPF(15));
-}
-
-static struct platform_pwm_backlight_data smdk6410_backlight_data = {
- .pwm_id = 1,
- .max_brightness = 255,
- .dft_brightness = 255,
- .pwm_period_ns = 78770,
- .init = smdk6410_backlight_init,
- .exit = smdk6410_backlight_exit,
-};
-
-static struct platform_device smdk6410_backlight_device = {
- .name = "pwm-backlight",
- .dev = {
- .parent = &s3c_device_timer[1].dev,
- .platform_data = &smdk6410_backlight_data,
- },
-};
-
static struct map_desc smdk6410_iodesc[] = {};
static struct platform_device *smdk6410_devices[] __initdata = {
diff --git a/arch/arm/mach-s5p64x0/Makefile.boot b/arch/arm/mach-s5p64x0/Makefile.boot
index ff90aa13bd6..79ece4055b0 100644
--- a/arch/arm/mach-s5p64x0/Makefile.boot
+++ b/arch/arm/mach-s5p64x0/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0x20008000
+ zreladdr-y += 0x20008000
params_phys-y := 0x20000100
diff --git a/arch/arm/mach-s5pc100/Makefile.boot b/arch/arm/mach-s5pc100/Makefile.boot
index ff90aa13bd6..79ece4055b0 100644
--- a/arch/arm/mach-s5pc100/Makefile.boot
+++ b/arch/arm/mach-s5pc100/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0x20008000
+ zreladdr-y += 0x20008000
params_phys-y := 0x20000100
diff --git a/arch/arm/mach-s5pv210/Makefile.boot b/arch/arm/mach-s5pv210/Makefile.boot
index ff90aa13bd6..79ece4055b0 100644
--- a/arch/arm/mach-s5pv210/Makefile.boot
+++ b/arch/arm/mach-s5pv210/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0x20008000
+ zreladdr-y += 0x20008000
params_phys-y := 0x20000100
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index 52a8e607bcc..f5f8fa89679 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -815,8 +815,7 @@ static struct clksrc_clk clksrcs[] = {
.reg_div = { .reg = S5P_CLK_DIV3, .shift = 20, .size = 4 },
}, {
.clk = {
- .name = "sclk_cam",
- .devname = "s5pv210-fimc.0",
+ .name = "sclk_cam0",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 3),
},
@@ -825,8 +824,7 @@ static struct clksrc_clk clksrcs[] = {
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 12, .size = 4 },
}, {
.clk = {
- .name = "sclk_cam",
- .devname = "s5pv210-fimc.1",
+ .name = "sclk_cam1",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 4),
},
diff --git a/arch/arm/mach-sa1100/Makefile b/arch/arm/mach-sa1100/Makefile
index 41252d22e65..00631787e80 100644
--- a/arch/arm/mach-sa1100/Makefile
+++ b/arch/arm/mach-sa1100/Makefile
@@ -45,7 +45,6 @@ obj-$(CONFIG_SA1100_PLEB) += pleb.o
obj-$(CONFIG_SA1100_SHANNON) += shannon.o
obj-$(CONFIG_SA1100_SIMPAD) += simpad.o
-led-$(CONFIG_SA1100_SIMPAD) += leds-simpad.o
# LEDs support
obj-$(CONFIG_LEDS) += $(led-y)
diff --git a/arch/arm/mach-sa1100/Makefile.boot b/arch/arm/mach-sa1100/Makefile.boot
index a56ad0417cf..5a616f6e561 100644
--- a/arch/arm/mach-sa1100/Makefile.boot
+++ b/arch/arm/mach-sa1100/Makefile.boot
@@ -1,6 +1,7 @@
- zreladdr-y := 0xc0008000
ifeq ($(CONFIG_ARCH_SA1100),y)
- zreladdr-$(CONFIG_SA1111) := 0xc0208000
+ zreladdr-$(CONFIG_SA1111) += 0xc0208000
+else
+ zreladdr-y += 0xc0008000
endif
params_phys-y := 0xc0000100
initrd_phys-y := 0xc0800000
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 26257df19b6..6290ce28b88 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -301,8 +301,7 @@ static void __init get_assabet_scr(void)
}
static void __init
-fixup_assabet(struct machine_desc *desc, struct tag *tags,
- char **cmdline, struct meminfo *mi)
+fixup_assabet(struct tag *tags, char **cmdline, struct meminfo *mi)
{
/* This must be done before any call to machine_has_neponset() */
map_sa1100_gpio_regs();
diff --git a/arch/arm/mach-sa1100/include/mach/io.h b/arch/arm/mach-sa1100/include/mach/io.h
index d8b43f3dcd2..dfc27ff0834 100644
--- a/arch/arm/mach-sa1100/include/mach/io.h
+++ b/arch/arm/mach-sa1100/include/mach/io.h
@@ -10,11 +10,9 @@
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-#define IO_SPACE_LIMIT 0xffffffff
-
/*
- * We don't actually have real ISA nor PCI buses, but there is so many
- * drivers out there that might just work if we fake them...
+ * __io() is required to be an equivalent mapping to __mem_pci() for
+ * SOC_COMMON to work.
*/
#define __io(a) __typesafe_io(a)
#define __mem_pci(a) (a)
diff --git a/arch/arm/mach-sa1100/include/mach/simpad.h b/arch/arm/mach-sa1100/include/mach/simpad.h
index 9296c4513ce..db28118103e 100644
--- a/arch/arm/mach-sa1100/include/mach/simpad.h
+++ b/arch/arm/mach-sa1100/include/mach/simpad.h
@@ -48,32 +48,80 @@
#define GPIO_SMART_CARD GPIO_GPIO10
#define IRQ_GPIO_SMARD_CARD IRQ_GPIO10
-// CS3 Latch is write only, a shadow is necessary
+/*--- ucb1x00 GPIO ---*/
+#define SIMPAD_UCB1X00_GPIO_BASE (GPIO_MAX + 1)
+#define SIMPAD_UCB1X00_GPIO_PROG1 (SIMPAD_UCB1X00_GPIO_BASE)
+#define SIMPAD_UCB1X00_GPIO_PROG2 (SIMPAD_UCB1X00_GPIO_BASE + 1)
+#define SIMPAD_UCB1X00_GPIO_UP (SIMPAD_UCB1X00_GPIO_BASE + 2)
+#define SIMPAD_UCB1X00_GPIO_DOWN (SIMPAD_UCB1X00_GPIO_BASE + 3)
+#define SIMPAD_UCB1X00_GPIO_LEFT (SIMPAD_UCB1X00_GPIO_BASE + 4)
+#define SIMPAD_UCB1X00_GPIO_RIGHT (SIMPAD_UCB1X00_GPIO_BASE + 5)
+#define SIMPAD_UCB1X00_GPIO_6 (SIMPAD_UCB1X00_GPIO_BASE + 6)
+#define SIMPAD_UCB1X00_GPIO_7 (SIMPAD_UCB1X00_GPIO_BASE + 7)
+#define SIMPAD_UCB1X00_GPIO_HEADSET (SIMPAD_UCB1X00_GPIO_BASE + 8)
+#define SIMPAD_UCB1X00_GPIO_SPEAKER (SIMPAD_UCB1X00_GPIO_BASE + 9)
+
+/*--- CS3 Latch ---*/
+#define SIMPAD_CS3_GPIO_BASE (GPIO_MAX + 11)
+#define SIMPAD_CS3_VCC_5V_EN (SIMPAD_CS3_GPIO_BASE)
+#define SIMPAD_CS3_VCC_3V_EN (SIMPAD_CS3_GPIO_BASE + 1)
+#define SIMPAD_CS3_EN1 (SIMPAD_CS3_GPIO_BASE + 2)
+#define SIMPAD_CS3_EN0 (SIMPAD_CS3_GPIO_BASE + 3)
+#define SIMPAD_CS3_DISPLAY_ON (SIMPAD_CS3_GPIO_BASE + 4)
+#define SIMPAD_CS3_PCMCIA_BUFF_DIS (SIMPAD_CS3_GPIO_BASE + 5)
+#define SIMPAD_CS3_MQ_RESET (SIMPAD_CS3_GPIO_BASE + 6)
+#define SIMPAD_CS3_PCMCIA_RESET (SIMPAD_CS3_GPIO_BASE + 7)
+#define SIMPAD_CS3_DECT_POWER_ON (SIMPAD_CS3_GPIO_BASE + 8)
+#define SIMPAD_CS3_IRDA_SD (SIMPAD_CS3_GPIO_BASE + 9)
+#define SIMPAD_CS3_RS232_ON (SIMPAD_CS3_GPIO_BASE + 10)
+#define SIMPAD_CS3_SD_MEDIAQ (SIMPAD_CS3_GPIO_BASE + 11)
+#define SIMPAD_CS3_LED2_ON (SIMPAD_CS3_GPIO_BASE + 12)
+#define SIMPAD_CS3_IRDA_MODE (SIMPAD_CS3_GPIO_BASE + 13)
+#define SIMPAD_CS3_ENABLE_5V (SIMPAD_CS3_GPIO_BASE + 14)
+#define SIMPAD_CS3_RESET_SIMCARD (SIMPAD_CS3_GPIO_BASE + 15)
+
+#define SIMPAD_CS3_PCMCIA_BVD1 (SIMPAD_CS3_GPIO_BASE + 16)
+#define SIMPAD_CS3_PCMCIA_BVD2 (SIMPAD_CS3_GPIO_BASE + 17)
+#define SIMPAD_CS3_PCMCIA_VS1 (SIMPAD_CS3_GPIO_BASE + 18)
+#define SIMPAD_CS3_PCMCIA_VS2 (SIMPAD_CS3_GPIO_BASE + 19)
+#define SIMPAD_CS3_LOCK_IND (SIMPAD_CS3_GPIO_BASE + 20)
+#define SIMPAD_CS3_CHARGING_STATE (SIMPAD_CS3_GPIO_BASE + 21)
+#define SIMPAD_CS3_PCMCIA_SHORT (SIMPAD_CS3_GPIO_BASE + 22)
+#define SIMPAD_CS3_GPIO_23 (SIMPAD_CS3_GPIO_BASE + 23)
-#define CS3BUSTYPE unsigned volatile long
#define CS3_BASE 0xf1000000
-#define VCC_5V_EN 0x0001 // For 5V PCMCIA
-#define VCC_3V_EN 0x0002 // FOR 3.3V PCMCIA
-#define EN1 0x0004 // This is only for EPROM's
-#define EN0 0x0008 // Both should be enable for 3.3V or 5V
-#define DISPLAY_ON 0x0010
-#define PCMCIA_BUFF_DIS 0x0020
-#define MQ_RESET 0x0040
-#define PCMCIA_RESET 0x0080
-#define DECT_POWER_ON 0x0100
-#define IRDA_SD 0x0200 // Shutdown for powersave
-#define RS232_ON 0x0400
-#define SD_MEDIAQ 0x0800 // Shutdown for powersave
-#define LED2_ON 0x1000
-#define IRDA_MODE 0x2000 // Fast/Slow IrDA mode
-#define ENABLE_5V 0x4000 // Enable 5V circuit
-#define RESET_SIMCARD 0x8000
-
-#define RS232_ENABLE 0x0440
-#define PCMCIAMASK 0x402f
-
-
+long simpad_get_cs3_ro(void);
+long simpad_get_cs3_shadow(void);
+void simpad_set_cs3_bit(int value);
+void simpad_clear_cs3_bit(int value);
+
+#define VCC_5V_EN 0x0001 /* For 5V PCMCIA */
+#define VCC_3V_EN 0x0002 /* FOR 3.3V PCMCIA */
+#define EN1 0x0004 /* This is only for EPROM's */
+#define EN0 0x0008 /* Both should be enable for 3.3V or 5V */
+#define DISPLAY_ON 0x0010
+#define PCMCIA_BUFF_DIS 0x0020
+#define MQ_RESET 0x0040
+#define PCMCIA_RESET 0x0080
+#define DECT_POWER_ON 0x0100
+#define IRDA_SD 0x0200 /* Shutdown for powersave */
+#define RS232_ON 0x0400
+#define SD_MEDIAQ 0x0800 /* Shutdown for powersave */
+#define LED2_ON 0x1000
+#define IRDA_MODE 0x2000 /* Fast/Slow IrDA mode */
+#define ENABLE_5V 0x4000 /* Enable 5V circuit */
+#define RESET_SIMCARD 0x8000
+
+#define PCMCIA_BVD1 0x01
+#define PCMCIA_BVD2 0x02
+#define PCMCIA_VS1 0x04
+#define PCMCIA_VS2 0x08
+#define LOCK_IND 0x10
+#define CHARGING_STATE 0x20
+#define PCMCIA_SHORT 0x40
+
+/*--- Battery ---*/
struct simpad_battery {
unsigned char ac_status; /* line connected yes/no */
unsigned char status; /* battery loading yes/no */
diff --git a/arch/arm/mach-sa1100/leds-simpad.c b/arch/arm/mach-sa1100/leds-simpad.c
deleted file mode 100644
index d50f4eeaa12..00000000000
--- a/arch/arm/mach-sa1100/leds-simpad.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * linux/arch/arm/mach-sa1100/leds-simpad.c
- *
- * Author: Juergen Messerer <juergen.messerer@siemens.ch>
- */
-#include <linux/init.h>
-
-#include <mach/hardware.h>
-#include <asm/leds.h>
-#include <asm/system.h>
-#include <mach/simpad.h>
-
-#include "leds.h"
-
-
-#define LED_STATE_ENABLED 1
-#define LED_STATE_CLAIMED 2
-
-static unsigned int led_state;
-static unsigned int hw_led_state;
-
-#define LED_GREEN (1)
-#define LED_MASK (1)
-
-extern void set_cs3_bit(int value);
-extern void clear_cs3_bit(int value);
-
-void simpad_leds_event(led_event_t evt)
-{
- switch (evt)
- {
- case led_start:
- hw_led_state = LED_GREEN;
- led_state = LED_STATE_ENABLED;
- break;
-
- case led_stop:
- led_state &= ~LED_STATE_ENABLED;
- break;
-
- case led_claim:
- led_state |= LED_STATE_CLAIMED;
- hw_led_state = LED_GREEN;
- break;
-
- case led_release:
- led_state &= ~LED_STATE_CLAIMED;
- hw_led_state = LED_GREEN;
- break;
-
-#ifdef CONFIG_LEDS_TIMER
- case led_timer:
- if (!(led_state & LED_STATE_CLAIMED))
- hw_led_state ^= LED_GREEN;
- break;
-#endif
-
-#ifdef CONFIG_LEDS_CPU
- case led_idle_start:
- break;
-
- case led_idle_end:
- break;
-#endif
-
- case led_halted:
- break;
-
- case led_green_on:
- if (led_state & LED_STATE_CLAIMED)
- hw_led_state |= LED_GREEN;
- break;
-
- case led_green_off:
- if (led_state & LED_STATE_CLAIMED)
- hw_led_state &= ~LED_GREEN;
- break;
-
- case led_amber_on:
- break;
-
- case led_amber_off:
- break;
-
- case led_red_on:
- break;
-
- case led_red_off:
- break;
-
- default:
- break;
- }
-
- if (led_state & LED_STATE_ENABLED)
- set_cs3_bit(LED2_ON);
- else
- clear_cs3_bit(LED2_ON);
-}
-
diff --git a/arch/arm/mach-sa1100/leds.c b/arch/arm/mach-sa1100/leds.c
index bbfe197fb4d..5fe71a0f105 100644
--- a/arch/arm/mach-sa1100/leds.c
+++ b/arch/arm/mach-sa1100/leds.c
@@ -42,8 +42,6 @@ sa1100_leds_init(void)
leds_event = adsbitsy_leds_event;
if (machine_is_pt_system3())
leds_event = system3_leds_event;
- if (machine_is_simpad())
- leds_event = simpad_leds_event; /* what about machine registry? including led, apm... -zecke */
leds_event(led_start);
return 0;
diff --git a/arch/arm/mach-sa1100/leds.h b/arch/arm/mach-sa1100/leds.h
index 68cc9f773d6..776b6020f55 100644
--- a/arch/arm/mach-sa1100/leds.h
+++ b/arch/arm/mach-sa1100/leds.h
@@ -11,4 +11,3 @@ extern void pfs168_leds_event(led_event_t evt);
extern void graphicsmaster_leds_event(led_event_t evt);
extern void adsbitsy_leds_event(led_event_t evt);
extern void system3_leds_event(led_event_t evt);
-extern void simpad_leds_event(led_event_t evt);
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c
index cfb76077bd2..34659f354be 100644
--- a/arch/arm/mach-sa1100/simpad.c
+++ b/arch/arm/mach-sa1100/simpad.c
@@ -13,6 +13,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
+#include <linux/gpio.h>
#include <asm/irq.h>
#include <mach/hardware.h>
@@ -28,35 +29,92 @@
#include <linux/serial_core.h>
#include <linux/ioport.h>
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+#include <linux/leds.h>
+#include <linux/i2c-gpio.h>
#include "generic.h"
-long cs3_shadow;
+/*
+ * CS3 support
+ */
-long get_cs3_shadow(void)
+static long cs3_shadow;
+static spinlock_t cs3_lock;
+static struct gpio_chip cs3_gpio;
+
+long simpad_get_cs3_ro(void)
+{
+ return readl(CS3_BASE);
+}
+EXPORT_SYMBOL(simpad_get_cs3_ro);
+
+long simpad_get_cs3_shadow(void)
{
return cs3_shadow;
}
+EXPORT_SYMBOL(simpad_get_cs3_shadow);
-void set_cs3(long value)
+static void __simpad_write_cs3(void)
{
- *(CS3BUSTYPE *)(CS3_BASE) = cs3_shadow = value;
+ writel(cs3_shadow, CS3_BASE);
}
-void set_cs3_bit(int value)
+void simpad_set_cs3_bit(int value)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs3_lock, flags);
cs3_shadow |= value;
- *(CS3BUSTYPE *)(CS3_BASE) = cs3_shadow;
+ __simpad_write_cs3();
+ spin_unlock_irqrestore(&cs3_lock, flags);
}
+EXPORT_SYMBOL(simpad_set_cs3_bit);
-void clear_cs3_bit(int value)
+void simpad_clear_cs3_bit(int value)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs3_lock, flags);
cs3_shadow &= ~value;
- *(CS3BUSTYPE *)(CS3_BASE) = cs3_shadow;
+ __simpad_write_cs3();
+ spin_unlock_irqrestore(&cs3_lock, flags);
}
+EXPORT_SYMBOL(simpad_clear_cs3_bit);
+
+static void cs3_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ if (offset > 15)
+ return;
+ if (value)
+ simpad_set_cs3_bit(1 << offset);
+ else
+ simpad_clear_cs3_bit(1 << offset);
+};
+
+static int cs3_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ if (offset > 15)
+ return simpad_get_cs3_ro() & (1 << (offset - 16));
+ return simpad_get_cs3_shadow() & (1 << offset);
+};
-EXPORT_SYMBOL(set_cs3_bit);
-EXPORT_SYMBOL(clear_cs3_bit);
+static int cs3_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ if (offset > 15)
+ return 0;
+ return -EINVAL;
+};
+
+static int cs3_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ if (offset > 15)
+ return -EINVAL;
+ cs3_gpio_set(chip, offset, value);
+ return 0;
+};
static struct map_desc simpad_io_desc[] __initdata = {
{ /* MQ200 */
@@ -64,9 +122,9 @@ static struct map_desc simpad_io_desc[] __initdata = {
.pfn = __phys_to_pfn(0x4b800000),
.length = 0x00800000,
.type = MT_DEVICE
- }, { /* Paules CS3, write only */
- .virtual = 0xf1000000,
- .pfn = __phys_to_pfn(0x18000000),
+ }, { /* Simpad CS3 */
+ .virtual = CS3_BASE,
+ .pfn = __phys_to_pfn(SA1100_CS3_PHYS),
.length = 0x00100000,
.type = MT_DEVICE
},
@@ -78,12 +136,12 @@ static void simpad_uart_pm(struct uart_port *port, u_int state, u_int oldstate)
if (port->mapbase == (u_int)&Ser1UTCR0) {
if (state)
{
- clear_cs3_bit(RS232_ON);
- clear_cs3_bit(DECT_POWER_ON);
+ simpad_clear_cs3_bit(RS232_ON);
+ simpad_clear_cs3_bit(DECT_POWER_ON);
}else
{
- set_cs3_bit(RS232_ON);
- set_cs3_bit(DECT_POWER_ON);
+ simpad_set_cs3_bit(RS232_ON);
+ simpad_set_cs3_bit(DECT_POWER_ON);
}
}
}
@@ -132,6 +190,7 @@ static struct resource simpad_flash_resources [] = {
static struct mcp_plat_data simpad_mcp_data = {
.mccr0 = MCCR0_ADM,
.sclk_rate = 11981000,
+ .gpio_base = SIMPAD_UCB1X00_GPIO_BASE,
};
@@ -142,9 +201,10 @@ static void __init simpad_map_io(void)
iotable_init(simpad_io_desc, ARRAY_SIZE(simpad_io_desc));
- set_cs3_bit (EN1 | EN0 | LED2_ON | DISPLAY_ON | RS232_ON |
- ENABLE_5V | RESET_SIMCARD | DECT_POWER_ON);
-
+ /* Initialize CS3 */
+ cs3_shadow = (EN1 | EN0 | LED2_ON | DISPLAY_ON |
+ RS232_ON | ENABLE_5V | RESET_SIMCARD | DECT_POWER_ON);
+ __simpad_write_cs3(); /* Spinlocks not yet initialized */
sa1100_register_uart_fns(&simpad_port_fns);
sa1100_register_uart(0, 3); /* serial interface */
@@ -170,13 +230,14 @@ static void __init simpad_map_io(void)
static void simpad_power_off(void)
{
- local_irq_disable(); // was cli
- set_cs3(0x800); /* only SD_MEDIAQ */
+ local_irq_disable();
+ cs3_shadow = SD_MEDIAQ;
+ __simpad_write_cs3(); /* Bypass spinlock here */
/* disable internal oscillator, float CS lines */
PCFR = (PCFR_OPDE | PCFR_FP | PCFR_FS);
- /* enable wake-up on GPIO0 (Assabet...) */
- PWER = GFER = GRER = 1;
+ /* enable wake-up on GPIO0 */
+ PWER = GFER = GRER = PWER_GPIO0;
/*
* set scratchpad to zero, just in case it is used as a
* restart address by the bootloader.
@@ -192,6 +253,91 @@ static void simpad_power_off(void)
}
+/*
+ * gpio_keys
+*/
+
+static struct gpio_keys_button simpad_button_table[] = {
+ { KEY_POWER, IRQ_GPIO_POWER_BUTTON, 1, "power button" },
+};
+
+static struct gpio_keys_platform_data simpad_keys_data = {
+ .buttons = simpad_button_table,
+ .nbuttons = ARRAY_SIZE(simpad_button_table),
+};
+
+static struct platform_device simpad_keys = {
+ .name = "gpio-keys",
+ .dev = {
+ .platform_data = &simpad_keys_data,
+ },
+};
+
+static struct gpio_keys_button simpad_polled_button_table[] = {
+ { KEY_PROG1, SIMPAD_UCB1X00_GPIO_PROG1, 1, "prog1 button" },
+ { KEY_PROG2, SIMPAD_UCB1X00_GPIO_PROG2, 1, "prog2 button" },
+ { KEY_UP, SIMPAD_UCB1X00_GPIO_UP, 1, "up button" },
+ { KEY_DOWN, SIMPAD_UCB1X00_GPIO_DOWN, 1, "down button" },
+ { KEY_LEFT, SIMPAD_UCB1X00_GPIO_LEFT, 1, "left button" },
+ { KEY_RIGHT, SIMPAD_UCB1X00_GPIO_RIGHT, 1, "right button" },
+};
+
+static struct gpio_keys_platform_data simpad_polled_keys_data = {
+ .buttons = simpad_polled_button_table,
+ .nbuttons = ARRAY_SIZE(simpad_polled_button_table),
+ .poll_interval = 50,
+};
+
+static struct platform_device simpad_polled_keys = {
+ .name = "gpio-keys-polled",
+ .dev = {
+ .platform_data = &simpad_polled_keys_data,
+ },
+};
+
+/*
+ * GPIO LEDs
+ */
+
+static struct gpio_led simpad_leds[] = {
+ {
+ .name = "simpad:power",
+ .gpio = SIMPAD_CS3_LED2_ON,
+ .active_low = 0,
+ .default_trigger = "default-on",
+ },
+};
+
+static struct gpio_led_platform_data simpad_led_data = {
+ .num_leds = ARRAY_SIZE(simpad_leds),
+ .leds = simpad_leds,
+};
+
+static struct platform_device simpad_gpio_leds = {
+ .name = "leds-gpio",
+ .id = 0,
+ .dev = {
+ .platform_data = &simpad_led_data,
+ },
+};
+
+/*
+ * i2c
+ */
+static struct i2c_gpio_platform_data simpad_i2c_data = {
+ .sda_pin = GPIO_GPIO21,
+ .scl_pin = GPIO_GPIO25,
+ .udelay = 10,
+ .timeout = HZ,
+};
+
+static struct platform_device simpad_i2c = {
+ .name = "i2c-gpio",
+ .id = 0,
+ .dev = {
+ .platform_data = &simpad_i2c_data,
+ },
+};
/*
* MediaQ Video Device
@@ -202,7 +348,11 @@ static struct platform_device simpad_mq200fb = {
};
static struct platform_device *devices[] __initdata = {
- &simpad_mq200fb
+ &simpad_keys,
+ &simpad_polled_keys,
+ &simpad_mq200fb,
+ &simpad_gpio_leds,
+ &simpad_i2c,
};
@@ -211,6 +361,19 @@ static int __init simpad_init(void)
{
int ret;
+ spin_lock_init(&cs3_lock);
+
+ cs3_gpio.label = "simpad_cs3";
+ cs3_gpio.base = SIMPAD_CS3_GPIO_BASE;
+ cs3_gpio.ngpio = 24;
+ cs3_gpio.set = cs3_gpio_set;
+ cs3_gpio.get = cs3_gpio_get;
+ cs3_gpio.direction_input = cs3_gpio_direction_input;
+ cs3_gpio.direction_output = cs3_gpio_direction_output;
+ ret = gpiochip_add(&cs3_gpio);
+ if (ret)
+ printk(KERN_WARNING "simpad: Unable to register cs3 GPIO device");
+
pm_power_off = simpad_power_off;
sa11x0_register_mtd(&simpad_flash_data, simpad_flash_resources,
diff --git a/arch/arm/mach-shark/Makefile.boot b/arch/arm/mach-shark/Makefile.boot
index 4320f8b9277..e40e24e4ca3 100644
--- a/arch/arm/mach-shark/Makefile.boot
+++ b/arch/arm/mach-shark/Makefile.boot
@@ -1,2 +1,2 @@
- zreladdr-y := 0x08008000
+ zreladdr-y += 0x08008000
diff --git a/arch/arm/mach-shmobile/Makefile.boot b/arch/arm/mach-shmobile/Makefile.boot
index 1c08ee9de86..498efd99338 100644
--- a/arch/arm/mach-shmobile/Makefile.boot
+++ b/arch/arm/mach-shmobile/Makefile.boot
@@ -1,7 +1,7 @@
__ZRELADDR := $(shell /bin/bash -c 'printf "0x%08x" \
$$[$(CONFIG_MEMORY_START) + 0x8000]')
- zreladdr-y := $(__ZRELADDR)
+ zreladdr-y += $(__ZRELADDR)
# Unsupported legacy stuff
#
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index 66f980625a3..e4e485fa253 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -56,6 +56,12 @@ void __init smp_init_cpus(void)
unsigned int ncores = shmobile_smp_get_core_count();
unsigned int i;
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
+ }
+
for (i = 0; i < ncores; i++)
set_cpu_possible(i, true);
diff --git a/arch/arm/mach-spear3xx/Makefile.boot b/arch/arm/mach-spear3xx/Makefile.boot
index 7a1f3c0eadb..4674a4c221d 100644
--- a/arch/arm/mach-spear3xx/Makefile.boot
+++ b/arch/arm/mach-spear3xx/Makefile.boot
@@ -1,3 +1,3 @@
-zreladdr-y := 0x00008000
+zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-spear6xx/Makefile.boot b/arch/arm/mach-spear6xx/Makefile.boot
index 7a1f3c0eadb..4674a4c221d 100644
--- a/arch/arm/mach-spear6xx/Makefile.boot
+++ b/arch/arm/mach-spear6xx/Makefile.boot
@@ -1,3 +1,3 @@
-zreladdr-y := 0x00008000
+zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-tcc8k/Makefile.boot b/arch/arm/mach-tcc8k/Makefile.boot
index f135c9deae1..5e02d4156b0 100644
--- a/arch/arm/mach-tcc8k/Makefile.boot
+++ b/arch/arm/mach-tcc8k/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x20008000
+ zreladdr-y += 0x20008000
params_phys-y := 0x20000100
initrd_phys-y := 0x20800000
diff --git a/arch/arm/mach-tegra/Makefile.boot b/arch/arm/mach-tegra/Makefile.boot
index 428ad122be0..5e870d29eca 100644
--- a/arch/arm/mach-tegra/Makefile.boot
+++ b/arch/arm/mach-tegra/Makefile.boot
@@ -1,4 +1,4 @@
-zreladdr-$(CONFIG_ARCH_TEGRA_2x_SOC) := 0x00008000
+zreladdr-$(CONFIG_ARCH_TEGRA_2x_SOC) += 0x00008000
params_phys-$(CONFIG_ARCH_TEGRA_2x_SOC) := 0x00000100
initrd_phys-$(CONFIG_ARCH_TEGRA_2x_SOC) := 0x00800000
diff --git a/arch/arm/mach-tegra/board-harmony.c b/arch/arm/mach-tegra/board-harmony.c
index 846cd7d69e3..c78ce41cca1 100644
--- a/arch/arm/mach-tegra/board-harmony.c
+++ b/arch/arm/mach-tegra/board-harmony.c
@@ -123,8 +123,8 @@ static struct platform_device *harmony_devices[] __initdata = {
&harmony_audio_device,
};
-static void __init tegra_harmony_fixup(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi)
+static void __init tegra_harmony_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
mi->nr_banks = 2;
mi->bank[0].start = PHYS_OFFSET;
diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
index ea2f79c9879..5e6bc771964 100644
--- a/arch/arm/mach-tegra/board-paz00.c
+++ b/arch/arm/mach-tegra/board-paz00.c
@@ -84,8 +84,8 @@ static void paz00_usb_init(void)
platform_device_register(&tegra_ehci3_device);
}
-static void __init tegra_paz00_fixup(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi)
+static void __init tegra_paz00_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
mi->nr_banks = 1;
mi->bank[0].start = PHYS_OFFSET;
diff --git a/arch/arm/mach-tegra/board-trimslice.c b/arch/arm/mach-tegra/board-trimslice.c
index 89a6d2adc1d..652c3404d0e 100644
--- a/arch/arm/mach-tegra/board-trimslice.c
+++ b/arch/arm/mach-tegra/board-trimslice.c
@@ -126,8 +126,8 @@ static void trimslice_usb_init(void)
platform_device_register(&tegra_ehci1_device);
}
-static void __init tegra_trimslice_fixup(struct machine_desc *desc,
- struct tag *tags, char **cmdline, struct meminfo *mi)
+static void __init tegra_trimslice_fixup(struct tag *tags, char **cmdline,
+ struct meminfo *mi)
{
mi->nr_banks = 2;
mi->bank[0].start = PHYS_OFFSET;
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c
index 0e1016a827a..0e0fd4d889b 100644
--- a/arch/arm/mach-tegra/cpu-tegra.c
+++ b/arch/arm/mach-tegra/cpu-tegra.c
@@ -32,7 +32,6 @@
#include <asm/system.h>
-#include <mach/hardware.h>
#include <mach/clk.h>
/* Frequency table index must be sequential starting at 0 */
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c
index 0886cbccdde..7d2b5d03c1d 100644
--- a/arch/arm/mach-tegra/platsmp.c
+++ b/arch/arm/mach-tegra/platsmp.c
@@ -114,10 +114,10 @@ void __init smp_init_cpus(void)
{
unsigned int i, ncores = scu_get_core_count(scu_base);
- if (ncores > NR_CPUS) {
- printk(KERN_ERR "Tegra: no. of cores (%u) greater than configured (%u), clipping\n",
- ncores, NR_CPUS);
- ncores = NR_CPUS;
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
}
for (i = 0; i < ncores; i++)
diff --git a/arch/arm/mach-u300/Makefile.boot b/arch/arm/mach-u300/Makefile.boot
index 6fbfc6ea2d3..69357affbd7 100644
--- a/arch/arm/mach-u300/Makefile.boot
+++ b/arch/arm/mach-u300/Makefile.boot
@@ -4,10 +4,10 @@
# INITRD_PHYS must be in RAM
ifdef CONFIG_MACH_U300_SINGLE_RAM
- zreladdr-y := 0x28E08000
+ zreladdr-y += 0x28E08000
params_phys-y := 0x28E00100
else
- zreladdr-y := 0x48008000
+ zreladdr-y += 0x48008000
params_phys-y := 0x48000100
endif
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 4210cb434db..a3e0c8692f0 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -6,6 +6,7 @@ config UX500_SOC_COMMON
select ARM_GIC
select HAS_MTU
select ARM_ERRATA_753970
+ select ARM_ERRATA_754322
menu "Ux500 SoC"
diff --git a/arch/arm/mach-ux500/Makefile.boot b/arch/arm/mach-ux500/Makefile.boot
index c7e75acfe6c..ff0a4b5b0a8 100644
--- a/arch/arm/mach-ux500/Makefile.boot
+++ b/arch/arm/mach-ux500/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index a33df5f4c27..eb5199102cf 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -156,12 +156,10 @@ void __init smp_init_cpus(void)
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
/* sanity check */
- if (ncores > NR_CPUS) {
- printk(KERN_WARNING
- "U8500: no. of cores (%d) greater than configured "
- "maximum of %d - clipping\n",
- ncores, NR_CPUS);
- ncores = NR_CPUS;
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
}
for (i = 0; i < ncores; i++)
diff --git a/arch/arm/mach-versatile/Makefile.boot b/arch/arm/mach-versatile/Makefile.boot
index c7e75acfe6c..ff0a4b5b0a8 100644
--- a/arch/arm/mach-versatile/Makefile.boot
+++ b/arch/arm/mach-versatile/Makefile.boot
@@ -1,4 +1,4 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-vexpress/Makefile.boot b/arch/arm/mach-vexpress/Makefile.boot
index 07c2d9c457e..8630b3d10a4 100644
--- a/arch/arm/mach-vexpress/Makefile.boot
+++ b/arch/arm/mach-vexpress/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x60008000
+ zreladdr-y += 0x60008000
params_phys-y := 0x60000100
initrd_phys-y := 0x60800000
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index bfd32f52c2d..2b1e836a76e 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -221,6 +221,12 @@ static void ct_ca9x4_init_cpu_map(void)
{
int i, ncores = scu_get_core_count(MMIO_P2V(A9_MPCORE_SCU));
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
+ }
+
for (i = 0; i < ncores; ++i)
set_cpu_possible(i, true);
diff --git a/arch/arm/mach-vexpress/hotplug.c b/arch/arm/mach-vexpress/hotplug.c
index ea4cbfb90a6..3668cf91d2d 100644
--- a/arch/arm/mach-vexpress/hotplug.c
+++ b/arch/arm/mach-vexpress/hotplug.c
@@ -13,6 +13,7 @@
#include <linux/smp.h>
#include <asm/cacheflush.h>
+#include <asm/system.h>
extern volatile int pen_release;
@@ -62,13 +63,7 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
* code will have already disabled interrupts
*/
for (;;) {
- /*
- * here's the WFI
- */
- asm(".word 0xe320f003\n"
- :
- :
- : "memory", "cc");
+ wfi();
if (pen_release == cpu) {
/*
diff --git a/arch/arm/mach-vexpress/include/mach/io.h b/arch/arm/mach-vexpress/include/mach/io.h
index 748bb524ee7..13522d86685 100644
--- a/arch/arm/mach-vexpress/include/mach/io.h
+++ b/arch/arm/mach-vexpress/include/mach/io.h
@@ -20,8 +20,6 @@
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-#define IO_SPACE_LIMIT 0xffffffff
-
#define __io(a) __typesafe_io(a)
#define __mem_pci(a) (a)
diff --git a/arch/arm/mach-vt8500/Makefile.boot b/arch/arm/mach-vt8500/Makefile.boot
index a8acc4e2490..b79c41cdfdf 100644
--- a/arch/arm/mach-vt8500/Makefile.boot
+++ b/arch/arm/mach-vt8500/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x01000000
diff --git a/arch/arm/mach-vt8500/include/mach/io.h b/arch/arm/mach-vt8500/include/mach/io.h
index 9077239f78c..46181eecf27 100644
--- a/arch/arm/mach-vt8500/include/mach/io.h
+++ b/arch/arm/mach-vt8500/include/mach/io.h
@@ -20,8 +20,6 @@
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-#define IO_SPACE_LIMIT 0xffff
-
#define __io(a) __typesafe_io((a) + 0xf0000000)
#define __mem_pci(a) (a)
diff --git a/arch/arm/mach-w90x900/Makefile.boot b/arch/arm/mach-w90x900/Makefile.boot
index a057b546b6e..6c3d421c2d1 100644
--- a/arch/arm/mach-w90x900/Makefile.boot
+++ b/arch/arm/mach-w90x900/Makefile.boot
@@ -1,3 +1,3 @@
-zreladdr-y := 0x00008000
+zreladdr-y += 0x00008000
params_phys-y := 0x00000100
diff --git a/arch/arm/mach-zynq/Makefile.boot b/arch/arm/mach-zynq/Makefile.boot
index 67039c3e0c4..760a0efe758 100644
--- a/arch/arm/mach-zynq/Makefile.boot
+++ b/arch/arm/mach-zynq/Makefile.boot
@@ -1,3 +1,3 @@
- zreladdr-y := 0x00008000
+ zreladdr-y += 0x00008000
params_phys-y := 0x00000100
initrd_phys-y := 0x00800000
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index cfbcf8b9559..c335c76e0d8 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -86,16 +86,6 @@ core_param(alignment, ai_usermode, int, 0600);
#define UM_FIXUP (1 << 1)
#define UM_SIGNAL (1 << 2)
-#ifdef CONFIG_PROC_FS
-static const char *usermode_action[] = {
- "ignored",
- "warn",
- "fixup",
- "fixup+warn",
- "signal",
- "signal+warn"
-};
-
/* Return true if and only if the ARMv6 unaligned access model is in use. */
static bool cpu_is_v6_unaligned(void)
{
@@ -123,6 +113,16 @@ static int safe_usermode(int new_usermode, bool warn)
return new_usermode;
}
+#ifdef CONFIG_PROC_FS
+static const char *usermode_action[] = {
+ "ignored",
+ "warn",
+ "fixup",
+ "fixup+warn",
+ "signal",
+ "signal+warn"
+};
+
static int alignment_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "User:\t\t%lu\n", ai_user);
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 9ecfdb51195..3f9b9980478 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -16,9 +16,12 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <asm/cacheflush.h>
#include <asm/hardware/cache-l2x0.h>
@@ -30,11 +33,19 @@ static DEFINE_SPINLOCK(l2x0_lock);
static uint32_t l2x0_way_mask; /* Bitmask of active ways */
static uint32_t l2x0_size;
+struct l2x0_regs l2x0_saved_regs;
+
+struct l2x0_of_data {
+ void (*setup)(const struct device_node *, __u32 *, __u32 *);
+ void (*save)(void);
+ void (*resume)(void);
+};
+
static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
{
/* wait for cache operation by line or way to complete */
while (readl_relaxed(reg) & mask)
- ;
+ cpu_relax();
}
#ifdef CONFIG_CACHE_PL310
@@ -277,7 +288,7 @@ static void l2x0_disable(void)
spin_unlock_irqrestore(&l2x0_lock, flags);
}
-static void __init l2x0_unlock(__u32 cache_id)
+static void l2x0_unlock(__u32 cache_id)
{
int lockregs;
int i;
@@ -353,6 +364,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
/* l2x0 controller is disabled */
writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
+ l2x0_saved_regs.aux_ctrl = aux;
+
l2x0_inv_all();
/* enable L2X0 */
@@ -372,3 +385,202 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
ways, cache_id, aux, l2x0_size);
}
+
+#ifdef CONFIG_OF
+static void __init l2x0_of_setup(const struct device_node *np,
+ __u32 *aux_val, __u32 *aux_mask)
+{
+ u32 data[2] = { 0, 0 };
+ u32 tag = 0;
+ u32 dirty = 0;
+ u32 val = 0, mask = 0;
+
+ of_property_read_u32(np, "arm,tag-latency", &tag);
+ if (tag) {
+ mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
+ val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
+ }
+
+ of_property_read_u32_array(np, "arm,data-latency",
+ data, ARRAY_SIZE(data));
+ if (data[0] && data[1]) {
+ mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
+ L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
+ val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
+ ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
+ }
+
+ of_property_read_u32(np, "arm,dirty-latency", &dirty);
+ if (dirty) {
+ mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
+ val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
+ }
+
+ *aux_val &= ~mask;
+ *aux_val |= val;
+ *aux_mask &= ~mask;
+}
+
+static void __init pl310_of_setup(const struct device_node *np,
+ __u32 *aux_val, __u32 *aux_mask)
+{
+ u32 data[3] = { 0, 0, 0 };
+ u32 tag[3] = { 0, 0, 0 };
+ u32 filter[2] = { 0, 0 };
+
+ of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
+ if (tag[0] && tag[1] && tag[2])
+ writel_relaxed(
+ ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
+ ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
+ ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
+ l2x0_base + L2X0_TAG_LATENCY_CTRL);
+
+ of_property_read_u32_array(np, "arm,data-latency",
+ data, ARRAY_SIZE(data));
+ if (data[0] && data[1] && data[2])
+ writel_relaxed(
+ ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
+ ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
+ ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
+ l2x0_base + L2X0_DATA_LATENCY_CTRL);
+
+ of_property_read_u32_array(np, "arm,filter-ranges",
+ filter, ARRAY_SIZE(filter));
+ if (filter[1]) {
+ writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
+ l2x0_base + L2X0_ADDR_FILTER_END);
+ writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
+ l2x0_base + L2X0_ADDR_FILTER_START);
+ }
+}
+
+static void __init pl310_save(void)
+{
+ u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
+ L2X0_CACHE_ID_RTL_MASK;
+
+ l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
+ L2X0_TAG_LATENCY_CTRL);
+ l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
+ L2X0_DATA_LATENCY_CTRL);
+ l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
+ L2X0_ADDR_FILTER_END);
+ l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
+ L2X0_ADDR_FILTER_START);
+
+ if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
+ /*
+ * From r2p0, there is Prefetch offset/control register
+ */
+ l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
+ L2X0_PREFETCH_CTRL);
+ /*
+ * From r3p0, there is Power control register
+ */
+ if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
+ l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
+ L2X0_POWER_CTRL);
+ }
+}
+
+static void l2x0_resume(void)
+{
+ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
+ /* restore aux ctrl and enable l2 */
+ l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
+
+ writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
+ L2X0_AUX_CTRL);
+
+ l2x0_inv_all();
+
+ writel_relaxed(1, l2x0_base + L2X0_CTRL);
+ }
+}
+
+static void pl310_resume(void)
+{
+ u32 l2x0_revision;
+
+ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
+ /* restore pl310 setup */
+ writel_relaxed(l2x0_saved_regs.tag_latency,
+ l2x0_base + L2X0_TAG_LATENCY_CTRL);
+ writel_relaxed(l2x0_saved_regs.data_latency,
+ l2x0_base + L2X0_DATA_LATENCY_CTRL);
+ writel_relaxed(l2x0_saved_regs.filter_end,
+ l2x0_base + L2X0_ADDR_FILTER_END);
+ writel_relaxed(l2x0_saved_regs.filter_start,
+ l2x0_base + L2X0_ADDR_FILTER_START);
+
+ l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
+ L2X0_CACHE_ID_RTL_MASK;
+
+ if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
+ writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
+ l2x0_base + L2X0_PREFETCH_CTRL);
+ if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
+ writel_relaxed(l2x0_saved_regs.pwr_ctrl,
+ l2x0_base + L2X0_POWER_CTRL);
+ }
+ }
+
+ l2x0_resume();
+}
+
+static const struct l2x0_of_data pl310_data = {
+ pl310_of_setup,
+ pl310_save,
+ pl310_resume,
+};
+
+static const struct l2x0_of_data l2x0_data = {
+ l2x0_of_setup,
+ NULL,
+ l2x0_resume,
+};
+
+static const struct of_device_id l2x0_ids[] __initconst = {
+ { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
+ { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
+ { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
+ {}
+};
+
+int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask)
+{
+ struct device_node *np;
+ struct l2x0_of_data *data;
+ struct resource res;
+
+ np = of_find_matching_node(NULL, l2x0_ids);
+ if (!np)
+ return -ENODEV;
+
+ if (of_address_to_resource(np, 0, &res))
+ return -ENODEV;
+
+ l2x0_base = ioremap(res.start, resource_size(&res));
+ if (!l2x0_base)
+ return -ENOMEM;
+
+ l2x0_saved_regs.phy_base = res.start;
+
+ data = of_match_node(l2x0_ids, np)->data;
+
+ /* L2 configuration can only be changed if the cache is disabled */
+ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
+ if (data->setup)
+ data->setup(np, &aux_val, &aux_mask);
+ }
+
+ if (data->save)
+ data->save();
+
+ l2x0_init(l2x0_base, aux_val, aux_mask);
+
+ outer_cache.resume = data->resume;
+ return 0;
+}
+#endif
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 3b24bfa3b82..07c4bc8ea0a 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -174,6 +174,10 @@ ENTRY(v7_coherent_user_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r12, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+ ALT_SMP(W(dsb))
+ ALT_UP(W(nop))
+#endif
1:
USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification
add r12, r12, r2
@@ -223,6 +227,10 @@ ENTRY(v7_flush_kern_dcache_area)
add r1, r0, r1
sub r3, r2, #1
bic r0, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+ ALT_SMP(W(dsb))
+ ALT_UP(W(nop))
+#endif
1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
add r0, r0, r2
@@ -247,6 +255,10 @@ v7_dma_inv_range:
sub r3, r2, #1
tst r0, r3
bic r0, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+ ALT_SMP(W(dsb))
+ ALT_UP(W(nop))
+#endif
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
tst r1, r3
@@ -270,6 +282,10 @@ v7_dma_clean_range:
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+ ALT_SMP(W(dsb))
+ ALT_UP(W(nop))
+#endif
1:
mcr p15, 0, r0, c7, c10, 1 @ clean D / U line
add r0, r0, r2
@@ -288,6 +304,10 @@ ENTRY(v7_dma_flush_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+ ALT_SMP(W(dsb))
+ ALT_UP(W(nop))
+#endif
1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
add r0, r0, r2
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 0a0a1e7c20d..235eb775fc7 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -123,8 +123,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
#endif
#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
-#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
-#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
+#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PMD_SHIFT)
+#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PMD_SHIFT)
/*
* These are the page tables (2MB each) covering uncached, DMA consistent allocations
@@ -183,7 +183,7 @@ static int __init consistent_init(void)
}
consistent_pte[i++] = pte;
- base += (1 << PGDIR_SHIFT);
+ base += PMD_SIZE;
} while (base < CONSISTENT_END);
return ret;
@@ -324,6 +324,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
+ else
+ __dma_free_buffer(page, size);
return addr;
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 3b5ea68acbb..aa33949fef6 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -20,6 +20,7 @@
#include <linux/highmem.h>
#include <linux/perf_event.h>
+#include <asm/exception.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index cc7e2d8be9a..f8037ba338a 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -496,6 +496,13 @@ static void __init free_unused_memmap(struct meminfo *mi)
*/
bank_start = min(bank_start,
ALIGN(prev_bank_end, PAGES_PER_SECTION));
+#else
+ /*
+ * Align down here since the VM subsystem insists that the
+ * memmap entries are valid from the bank start aligned to
+ * MAX_ORDER_NR_PAGES.
+ */
+ bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
#endif
/*
* If we had a previous bank, and there is a space
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index ab506272b2d..bdb248c4f55 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -289,6 +289,27 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
}
EXPORT_SYMBOL(__arm_ioremap);
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space as memory. Needed when the kernel wants to execute
+ * code in external memory. This is needed for reprogramming source
+ * clocks that would affect normal memory for example. Please see
+ * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
+ */
+void __iomem *
+__arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
+{
+ unsigned int mtype;
+
+ if (cached)
+ mtype = MT_MEMORY;
+ else
+ mtype = MT_MEMORY_NONCACHED;
+
+ return __arm_ioremap_caller(phys_addr, size, mtype,
+ __builtin_return_address(0));
+}
+
void __iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 010566799c8..ad7cce3bc43 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -12,8 +12,8 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
struct mem_type {
pteval_t prot_pte;
- unsigned int prot_l1;
- unsigned int prot_sect;
+ pmdval_t prot_l1;
+ pmdval_t prot_sect;
unsigned int domain;
};
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 594d677b92c..226f1804be1 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -60,7 +60,7 @@ EXPORT_SYMBOL(pgprot_kernel);
struct cachepolicy {
const char policy[16];
unsigned int cr_mask;
- unsigned int pmd;
+ pmdval_t pmd;
pteval_t pte;
};
@@ -288,7 +288,7 @@ static void __init build_mem_type_table(void)
{
struct cachepolicy *cp;
unsigned int cr = get_cr();
- unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
+ pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
int cpu_arch = cpu_architecture();
int i;
@@ -863,14 +863,14 @@ static inline void prepare_page_table(void)
/*
* Clear out all the mappings below the kernel image.
*/
- for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
+ for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
#ifdef CONFIG_XIP_KERNEL
/* The XIP kernel is mapped in the module area -- skip over it */
- addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
+ addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
#endif
- for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
+ for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
/*
@@ -885,10 +885,12 @@ static inline void prepare_page_table(void)
* memory bank, up to the end of the vmalloc region.
*/
for (addr = __phys_to_virt(end);
- addr < VMALLOC_END; addr += PGDIR_SIZE)
+ addr < VMALLOC_END; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
}
+#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
+
/*
* Reserve the special regions of memory
*/
@@ -898,7 +900,7 @@ void __init arm_mm_memblock_reserve(void)
* Reserve the page tables. These are already in use,
* and can only be in node 0.
*/
- memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t));
+ memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
#ifdef CONFIG_SA1111
/*
@@ -926,7 +928,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
*/
vectors_page = early_alloc(PAGE_SIZE);
- for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
+ for (addr = VMALLOC_END; addr; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
/*
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 9049c0764db..9591c8e9fb8 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -218,7 +218,7 @@ ENDPROC(cpu_v7_set_pte_ext)
/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
.globl cpu_v7_suspend_size
.equ cpu_v7_suspend_size, 4 * 9
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v7_do_suspend)
stmfd sp!, {r4 - r11, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index bb8f4a6b3e3..5b605a9eb09 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -14,6 +14,7 @@ config ARCH_OMAP1
select CLKDEV_LOOKUP
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
+ select HAVE_IDE
help
"Systems based on omap7xx, omap15xx or omap16xx"
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c
index f71078ef6bb..c65eb791d1b 100644
--- a/arch/arm/plat-s5p/irq-gpioint.c
+++ b/arch/arm/plat-s5p/irq-gpioint.c
@@ -114,17 +114,18 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
{
static int used_gpioint_groups = 0;
int group = chip->group;
- struct s5p_gpioint_bank *bank = NULL;
+ struct s5p_gpioint_bank *b, *bank = NULL;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
if (used_gpioint_groups >= S5P_GPIOINT_GROUP_COUNT)
return -ENOMEM;
- list_for_each_entry(bank, &banks, list) {
- if (group >= bank->start &&
- group < bank->start + bank->nr_groups)
+ list_for_each_entry(b, &banks, list) {
+ if (group >= b->start && group < b->start + b->nr_groups) {
+ bank = b;
break;
+ }
}
if (!bank)
return -EINVAL;
@@ -162,9 +163,9 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
ct->chip.irq_mask = irq_gc_mask_set_bit;
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
ct->chip.irq_set_type = s5p_gpioint_set_type,
- ct->regs.ack = PEND_OFFSET + REG_OFFSET(chip->group);
- ct->regs.mask = MASK_OFFSET + REG_OFFSET(chip->group);
- ct->regs.type = CON_OFFSET + REG_OFFSET(chip->group);
+ ct->regs.ack = PEND_OFFSET + REG_OFFSET(group - bank->start);
+ ct->regs.mask = MASK_OFFSET + REG_OFFSET(group - bank->start);
+ ct->regs.type = CON_OFFSET + REG_OFFSET(group - bank->start);
irq_setup_generic_chip(gc, IRQ_MSK(chip->chip.ngpio),
IRQ_GC_INIT_MASK_CACHE,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c
index 302c42670bd..3b4451979d1 100644
--- a/arch/arm/plat-samsung/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -64,6 +64,17 @@ static LIST_HEAD(clocks);
*/
DEFINE_SPINLOCK(clocks_lock);
+/* Global watchdog clock used by arch_wtd_reset() callback */
+struct clk *s3c2410_wdtclk;
+static int __init s3c_wdt_reset_init(void)
+{
+ s3c2410_wdtclk = clk_get(NULL, "watchdog");
+ if (IS_ERR(s3c2410_wdtclk))
+ printk(KERN_WARNING "%s: warning: cannot get watchdog clock\n", __func__);
+ return 0;
+}
+arch_initcall(s3c_wdt_reset_init);
+
/* enable and disable calls for use with the clk struct */
static int clk_null_enable(struct clk *clk, int enable)
diff --git a/arch/arm/plat-samsung/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h
index 87d5b38a86f..73c66d4d10f 100644
--- a/arch/arm/plat-samsung/include/plat/clock.h
+++ b/arch/arm/plat-samsung/include/plat/clock.h
@@ -9,6 +9,9 @@
* published by the Free Software Foundation.
*/
+#ifndef __ASM_PLAT_CLOCK_H
+#define __ASM_PLAT_CLOCK_H __FILE__
+
#include <linux/spinlock.h>
#include <linux/clkdev.h>
@@ -121,3 +124,8 @@ extern int s3c64xx_sclk_ctrl(struct clk *clk, int enable);
extern void s3c_pwmclk_init(void);
+/* Global watchdog clock used by arch_wtd_reset() callback */
+
+extern struct clk *s3c2410_wdtclk;
+
+#endif /* __ASM_PLAT_CLOCK_H */
diff --git a/arch/arm/plat-samsung/include/plat/watchdog-reset.h b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
index 54b762acb5a..40dbb2b0ae2 100644
--- a/arch/arm/plat-samsung/include/plat/watchdog-reset.h
+++ b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*/
+#include <plat/clock.h>
#include <plat/regs-watchdog.h>
#include <mach/map.h>
@@ -19,17 +20,12 @@
static inline void arch_wdt_reset(void)
{
- struct clk *wdtclk;
-
printk("arch_reset: attempting watchdog reset\n");
__raw_writel(0, S3C2410_WTCON); /* disable watchdog, to be safe */
- wdtclk = clk_get(NULL, "watchdog");
- if (!IS_ERR(wdtclk)) {
- clk_enable(wdtclk);
- } else
- printk(KERN_WARNING "%s: warning: cannot get watchdog clock\n", __func__);
+ if (s3c2410_wdtclk)
+ clk_enable(s3c2410_wdtclk);
/* put initial values into count and data */
__raw_writel(0x80, S3C2410_WTCNT);
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 62cc8f98117..5bdeef96984 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,10 +12,9 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
-# XXX: This is a cut-down version of the file; it contains only machines that
-# XXX: are in mainline or have been submitted to the machine database within
-# XXX: the last 12 months. If your entry is missing please email rmk at
-# XXX: <linux@arm.linux.org.uk>
+# This is a cut-down version of the file; it contains only machines that
+# are merged into mainline or have been edited in the machine database
+# within the last 12 months. References to machine_is_NAME() do not count!
#
# Last update: Sat May 7 08:48:24 2011
#
@@ -65,6 +64,7 @@ h7201 ARCH_H7201 H7201 161
h7202 ARCH_H7202 H7202 162
iq80321 ARCH_IQ80321 IQ80321 169
ks8695 ARCH_KS8695 KS8695 180
+karo ARCH_KARO KARO 190
smdk2410 ARCH_SMDK2410 SMDK2410 193
ceiva ARCH_CEIVA CEIVA 200
voiceblue MACH_VOICEBLUE VOICEBLUE 218
@@ -188,6 +188,7 @@ omap_2430sdp MACH_OMAP_2430SDP OMAP_2430SDP 900
davinci_evm MACH_DAVINCI_EVM DAVINCI_EVM 901
palmz72 MACH_PALMZ72 PALMZ72 904
nxdb500 MACH_NXDB500 NXDB500 905
+apf9328 MACH_APF9328 APF9328 906
palmt5 MACH_PALMT5 PALMT5 917
palmtc MACH_PALMTC PALMTC 918
omap_apollon MACH_OMAP_APOLLON OMAP_APOLLON 919
@@ -271,10 +272,12 @@ pcm038 MACH_PCM038 PCM038 1551
ts_x09 MACH_TS209 TS209 1565
at91cap9adk MACH_AT91CAP9ADK AT91CAP9ADK 1566
mx31moboard MACH_MX31MOBOARD MX31MOBOARD 1574
+vision_ep9307 MACH_VISION_EP9307 VISION_EP9307 1578
terastation_pro2 MACH_TERASTATION_PRO2 TERASTATION_PRO2 1584
linkstation_pro MACH_LINKSTATION_PRO LINKSTATION_PRO 1585
e350 MACH_E350 E350 1596
ts409 MACH_TS409 TS409 1601
+rsi_ews MACH_RSI_EWS RSI_EWS 1609
cm_x300 MACH_CM_X300 CM_X300 1616
at91sam9g20ek MACH_AT91SAM9G20EK AT91SAM9G20EK 1624
smdk6410 MACH_SMDK6410 SMDK6410 1626
@@ -331,6 +334,7 @@ smdkc100 MACH_SMDKC100 SMDKC100 1826
tavorevb MACH_TAVOREVB TAVOREVB 1827
saar MACH_SAAR SAAR 1828
at91sam9m10g45ek MACH_AT91SAM9M10G45EK AT91SAM9M10G45EK 1830
+usb_a9g20 MACH_USB_A9G20 USB_A9G20 1841
mxlads MACH_MXLADS MXLADS 1851
linkstation_mini MACH_LINKSTATION_MINI LINKSTATION_MINI 1858
afeb9260 MACH_AFEB9260 AFEB9260 1859
@@ -369,6 +373,7 @@ pcm043 MACH_PCM043 PCM043 2072
sheevaplug MACH_SHEEVAPLUG SHEEVAPLUG 2097
avengers_lite MACH_AVENGERS_LITE AVENGERS_LITE 2104
mx51_babbage MACH_MX51_BABBAGE MX51_BABBAGE 2125
+tx37 MACH_TX37 TX37 2127
rd78x00_masa MACH_RD78X00_MASA RD78X00_MASA 2135
dm355_leopard MACH_DM355_LEOPARD DM355_LEOPARD 2138
ts219 MACH_TS219 TS219 2139
@@ -379,6 +384,7 @@ omap_4430sdp MACH_OMAP_4430SDP OMAP_4430SDP 2160
magx_zn5 MACH_MAGX_ZN5 MAGX_ZN5 2162
btmavb101 MACH_BTMAVB101 BTMAVB101 2172
btmawb101 MACH_BTMAWB101 BTMAWB101 2173
+tx25 MACH_TX25 TX25 2177
omap3_torpedo MACH_OMAP3_TORPEDO OMAP3_TORPEDO 2178
anw6410 MACH_ANW6410 ANW6410 2183
imx27_visstrim_m10 MACH_IMX27_VISSTRIM_M10 IMX27_VISSTRIM_M10 2187
@@ -423,6 +429,7 @@ raumfeld_rc MACH_RAUMFELD_RC RAUMFELD_RC 2413
raumfeld_connector MACH_RAUMFELD_CONNECTOR RAUMFELD_CONNECTOR 2414
raumfeld_speaker MACH_RAUMFELD_SPEAKER RAUMFELD_SPEAKER 2415
tnetv107x MACH_TNETV107X TNETV107X 2418
+mx51_m2id MACH_MX51_M2ID MX51_M2ID 2428
smdkv210 MACH_SMDKV210 SMDKV210 2456
omap_zoom3 MACH_OMAP_ZOOM3 OMAP_ZOOM3 2464
omap_3630sdp MACH_OMAP_3630SDP OMAP_3630SDP 2465
@@ -433,14 +440,17 @@ omapl138_hawkboard MACH_OMAPL138_HAWKBOARD OMAPL138_HAWKBOARD 2495
ts41x MACH_TS41X TS41X 2502
phy3250 MACH_PHY3250 PHY3250 2511
mini6410 MACH_MINI6410 MINI6410 2520
+tx51 MACH_TX51 TX51 2529
mx28evk MACH_MX28EVK MX28EVK 2531
smartq5 MACH_SMARTQ5 SMARTQ5 2534
davinci_dm6467tevm MACH_DAVINCI_DM6467TEVM DAVINCI_DM6467TEVM 2548
mxt_td60 MACH_MXT_TD60 MXT_TD60 2550
riot_bei2 MACH_RIOT_BEI2 RIOT_BEI2 2576
riot_x37 MACH_RIOT_X37 RIOT_X37 2578
+pca101 MACH_PCA101 PCA101 2595
capc7117 MACH_CAPC7117 CAPC7117 2612
icontrol MACH_ICONTROL ICONTROL 2624
+gplugd MACH_GPLUGD GPLUGD 2625
qsd8x50a_st1_5 MACH_QSD8X50A_ST1_5 QSD8X50A_ST1_5 2627
mx23evk MACH_MX23EVK MX23EVK 2629
ap4evb MACH_AP4EVB AP4EVB 2630
@@ -1113,3 +1123,5 @@ blissc MACH_BLISSC BLISSC 3491
thales_adc MACH_THALES_ADC THALES_ADC 3492
ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493
atdgp318 MACH_ATDGP318 ATDGP318 3494
+smdk4212 MACH_SMDK4212 SMDK4212 3638
+smdk4412 MACH_SMDK4412 SMDK4412 3765
diff --git a/arch/arm/vfp/Makefile b/arch/arm/vfp/Makefile
index 6de73aab019..a81404c09d5 100644
--- a/arch/arm/vfp/Makefile
+++ b/arch/arm/vfp/Makefile
@@ -7,7 +7,7 @@
# ccflags-y := -DDEBUG
# asflags-y := -DDEBUG
-KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp)
+KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp -mfloat-abi=soft)
LDFLAGS +=--no-warn-mismatch
obj-y += vfp.o
diff --git a/arch/m68k/kernel/vmlinux.lds_no.S b/arch/m68k/kernel/vmlinux.lds_no.S
index 7dc4087a954..4e238934083 100644
--- a/arch/m68k/kernel/vmlinux.lds_no.S
+++ b/arch/m68k/kernel/vmlinux.lds_no.S
@@ -77,7 +77,6 @@ SECTIONS {
*(.rodata) *(.rodata.*)
*(__vermagic) /* Kernel version magic */
- *(__markers_strings)
*(.rodata1)
*(.rodata.str1.1)
diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c
index 900d899f332..f92190c159b 100644
--- a/arch/m68k/mac/macints.c
+++ b/arch/m68k/mac/macints.c
@@ -370,7 +370,7 @@ int mac_irq_pending(unsigned int irq)
break;
case 4:
if (psc_present)
- psc_irq_pending(irq);
+ return psc_irq_pending(irq);
break;
}
return 0;
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index e023fc6b37e..eb915551de6 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -304,35 +304,41 @@ static void via_write_pram(int offset, __u8 data)
static long via_read_time(void)
{
union {
- __u8 cdata[4];
- long idata;
+ __u8 cdata[4];
+ long idata;
} result, last_result;
- int ct;
+ int count = 1;
+
+ via_pram_command(0x81, &last_result.cdata[3]);
+ via_pram_command(0x85, &last_result.cdata[2]);
+ via_pram_command(0x89, &last_result.cdata[1]);
+ via_pram_command(0x8D, &last_result.cdata[0]);
/*
* The NetBSD guys say to loop until you get the same reading
* twice in a row.
*/
- ct = 0;
- do {
- if (++ct > 10) {
- printk("via_read_time: couldn't get valid time, "
- "last read = 0x%08lx and 0x%08lx\n",
- last_result.idata, result.idata);
- break;
- }
-
- last_result.idata = result.idata;
- result.idata = 0;
-
+ while (1) {
via_pram_command(0x81, &result.cdata[3]);
via_pram_command(0x85, &result.cdata[2]);
via_pram_command(0x89, &result.cdata[1]);
via_pram_command(0x8D, &result.cdata[0]);
- } while (result.idata != last_result.idata);
- return result.idata - RTC_OFFSET;
+ if (result.idata == last_result.idata)
+ return result.idata - RTC_OFFSET;
+
+ if (++count > 10)
+ break;
+
+ last_result.idata = result.idata;
+ }
+
+ pr_err("via_read_time: failed to read a stable value; "
+ "got 0x%08lx then 0x%08lx\n",
+ last_result.idata, result.idata);
+
+ return 0;
}
/*
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 177cdaf8356..b122adc8bdb 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -24,6 +24,7 @@ config MIPS
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select HAVE_ARCH_JUMP_LABEL
+ select IRQ_FORCED_THREADING
menu "Machine selection"
@@ -722,6 +723,7 @@ config CAVIUM_OCTEON_SIMULATOR
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_HOTPLUG_CPU
select SYS_HAS_CPU_CAVIUM_OCTEON
+ select HOLES_IN_ZONE
help
The Octeon simulator is software performance model of the Cavium
Octeon Processor. It supports simulating Octeon processors on x86
@@ -744,6 +746,7 @@ config CAVIUM_OCTEON_REFERENCE_BOARD
select ZONE_DMA32
select USB_ARCH_HAS_OHCI
select USB_ARCH_HAS_EHCI
+ select HOLES_IN_ZONE
help
This option supports all of the Octeon reference boards from Cavium
Networks. It builds a kernel that dynamically determines the Octeon
@@ -973,6 +976,9 @@ config ISA_DMA_API
config GENERIC_GPIO
bool
+config HOLES_IN_ZONE
+ bool
+
#
# Endianess selection. Sufficiently obscure so many users don't know what to
# answer,so we try hard to limit the available choices. Also the use of a
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c
index 3b2c18b1434..f72c48d4804 100644
--- a/arch/mips/alchemy/common/platform.c
+++ b/arch/mips/alchemy/common/platform.c
@@ -492,7 +492,7 @@ static void __init alchemy_setup_macs(int ctype)
memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6);
ret = platform_device_register(&au1xxx_eth0_device);
- if (!ret)
+ if (ret)
printk(KERN_INFO "Alchemy: failed to register MAC0\n");
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c
index 647e518c90b..b86324a4260 100644
--- a/arch/mips/alchemy/common/power.c
+++ b/arch/mips/alchemy/common/power.c
@@ -158,15 +158,21 @@ static void restore_core_regs(void)
void au_sleep(void)
{
- int cpuid = alchemy_get_cputype();
- if (cpuid != ALCHEMY_CPU_UNKNOWN) {
- save_core_regs();
- if (cpuid <= ALCHEMY_CPU_AU1500)
- alchemy_sleep_au1000();
- else if (cpuid <= ALCHEMY_CPU_AU1200)
- alchemy_sleep_au1550();
- restore_core_regs();
+ save_core_regs();
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ case ALCHEMY_CPU_AU1500:
+ case ALCHEMY_CPU_AU1100:
+ alchemy_sleep_au1000();
+ break;
+ case ALCHEMY_CPU_AU1550:
+ case ALCHEMY_CPU_AU1200:
+ alchemy_sleep_au1550();
+ break;
}
+
+ restore_core_regs();
}
#endif /* CONFIG_PM */
diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c
index 596ad00e7f0..463d2c4d944 100644
--- a/arch/mips/alchemy/devboards/bcsr.c
+++ b/arch/mips/alchemy/devboards/bcsr.c
@@ -89,8 +89,12 @@ static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
{
unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
+ disable_irq_nosync(irq);
+
for ( ; bisr; bisr &= bisr - 1)
generic_handle_irq(bcsr_csc_base + __ffs(bisr));
+
+ enable_irq(irq);
}
/* NOTE: both the enable and mask bits must be cleared, otherwise the
diff --git a/arch/mips/alchemy/devboards/db1200/setup.c b/arch/mips/alchemy/devboards/db1200/setup.c
index 1dac4f27d33..4a8980027ec 100644
--- a/arch/mips/alchemy/devboards/db1200/setup.c
+++ b/arch/mips/alchemy/devboards/db1200/setup.c
@@ -23,13 +23,6 @@ void __init board_setup(void)
unsigned long freq0, clksrc, div, pfc;
unsigned short whoami;
- /* Set Config[OD] (disable overlapping bus transaction):
- * This gets rid of a _lot_ of spurious interrupts (especially
- * wrt. IDE); but incurs ~10% performance hit in some
- * cpu-bound applications.
- */
- set_c0_config(1 << 19);
-
bcsr_init(DB1200_BCSR_PHYS_ADDR,
DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS);
diff --git a/arch/mips/ar7/irq.c b/arch/mips/ar7/irq.c
index 03db3daadbd..88c4babfdb5 100644
--- a/arch/mips/ar7/irq.c
+++ b/arch/mips/ar7/irq.c
@@ -98,7 +98,8 @@ static struct irq_chip ar7_sec_irq_type = {
static struct irqaction ar7_cascade_action = {
.handler = no_action,
- .name = "AR7 cascade interrupt"
+ .name = "AR7 cascade interrupt",
+ .flags = IRQF_NO_THREAD,
};
static void __init ar7_irq_init(int base)
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c
index cea6021cb8d..162e11b4ed7 100644
--- a/arch/mips/bcm63xx/irq.c
+++ b/arch/mips/bcm63xx/irq.c
@@ -222,6 +222,7 @@ static struct irq_chip bcm63xx_external_irq_chip = {
static struct irqaction cpu_ip2_cascade_action = {
.handler = no_action,
.name = "cascade_ip2",
+ .flags = IRQF_NO_THREAD,
};
void __init arch_init_irq(void)
diff --git a/arch/mips/cobalt/irq.c b/arch/mips/cobalt/irq.c
index cb9bf820fe5..965c777d356 100644
--- a/arch/mips/cobalt/irq.c
+++ b/arch/mips/cobalt/irq.c
@@ -48,6 +48,7 @@ asmlinkage void plat_irq_dispatch(void)
static struct irqaction cascade = {
.handler = no_action,
.name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
void __init arch_init_irq(void)
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index fa45e924be0..f7b7ba6d5c4 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -101,20 +101,24 @@ int cpu_fpu_mask = DEC_CPU_IRQ_MASK(DEC_CPU_INR_FPU);
static struct irqaction ioirq = {
.handler = no_action,
.name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
static struct irqaction fpuirq = {
.handler = no_action,
.name = "fpu",
+ .flags = IRQF_NO_THREAD,
};
static struct irqaction busirq = {
.flags = IRQF_DISABLED,
.name = "bus error",
+ .flags = IRQF_NO_THREAD,
};
static struct irqaction haltirq = {
.handler = dec_intr_halt,
.name = "halt",
+ .flags = IRQF_NO_THREAD,
};
diff --git a/arch/mips/emma/markeins/irq.c b/arch/mips/emma/markeins/irq.c
index 3dbd7a5a6ad..7798887a128 100644
--- a/arch/mips/emma/markeins/irq.c
+++ b/arch/mips/emma/markeins/irq.c
@@ -169,7 +169,7 @@ void emma2rh_gpio_irq_init(void)
static struct irqaction irq_cascade = {
.handler = no_action,
- .flags = 0,
+ .flags = IRQF_NO_THREAD,
.name = "cascade",
.dev_id = NULL,
.next = NULL,
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
index 0d5a42b5f47..a58addb98cf 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
@@ -54,7 +54,6 @@
#define cpu_has_mips_r2_exec_hazard 0
#define cpu_has_dsp 0
#define cpu_has_mipsmt 0
-#define cpu_has_userlocal 0
#define cpu_has_vint 0
#define cpu_has_veic 0
#define cpu_hwrena_impl_bits 0xc0000000
diff --git a/arch/mips/include/asm/mach-powertv/dma-coherence.h b/arch/mips/include/asm/mach-powertv/dma-coherence.h
index 62c09408594..35371641575 100644
--- a/arch/mips/include/asm/mach-powertv/dma-coherence.h
+++ b/arch/mips/include/asm/mach-powertv/dma-coherence.h
@@ -13,7 +13,6 @@
#define __ASM_MACH_POWERTV_DMA_COHERENCE_H
#include <linux/sched.h>
-#include <linux/version.h>
#include <linux/device.h>
#include <asm/mach-powertv/asic.h>
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index b4ba2449444..cb41af5f340 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -195,9 +195,9 @@
* to cover the pipeline delay.
*/
.set mips32
- mfc0 v1, CP0_TCSTATUS
+ mfc0 k0, CP0_TCSTATUS
.set mips0
- LONG_S v1, PT_TCSTATUS(sp)
+ LONG_S k0, PT_TCSTATUS(sp)
#endif /* CONFIG_MIPS_MT_SMTC */
LONG_S $4, PT_R4(sp)
LONG_S $5, PT_R5(sp)
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index 73031f7fc82..4397972949f 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
-#include <linux/sysdev.h>
+#include <linux/syscore_ops.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/delay.h>
@@ -86,7 +86,6 @@ struct jz_gpio_chip {
spinlock_t lock;
struct gpio_chip gpio_chip;
- struct sys_device sysdev;
};
static struct jz_gpio_chip jz4740_gpio_chips[];
@@ -459,49 +458,47 @@ static struct jz_gpio_chip jz4740_gpio_chips[] = {
JZ4740_GPIO_CHIP(D),
};
-static inline struct jz_gpio_chip *sysdev_to_chip(struct sys_device *dev)
+static void jz4740_gpio_suspend_chip(struct jz_gpio_chip *chip)
{
- return container_of(dev, struct jz_gpio_chip, sysdev);
+ chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK);
+ writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET);
+ writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR);
}
-static int jz4740_gpio_suspend(struct sys_device *dev, pm_message_t state)
+static int jz4740_gpio_suspend(void)
{
- struct jz_gpio_chip *chip = sysdev_to_chip(dev);
+ int i;
- chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK);
- writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET);
- writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR);
+ for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); i++)
+ jz4740_gpio_suspend_chip(&jz4740_gpio_chips[i]);
return 0;
}
-static int jz4740_gpio_resume(struct sys_device *dev)
+static void jz4740_gpio_resume_chip(struct jz_gpio_chip *chip)
{
- struct jz_gpio_chip *chip = sysdev_to_chip(dev);
uint32_t mask = chip->suspend_mask;
writel(~mask, chip->base + JZ_REG_GPIO_MASK_CLEAR);
writel(mask, chip->base + JZ_REG_GPIO_MASK_SET);
+}
- return 0;
+static void jz4740_gpio_resume(void)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(jz4740_gpio_chips) - 1; i >= 0 ; i--)
+ jz4740_gpio_resume_chip(&jz4740_gpio_chips[i]);
}
-static struct sysdev_class jz4740_gpio_sysdev_class = {
- .name = "gpio",
+static struct syscore_ops jz4740_gpio_syscore_ops = {
.suspend = jz4740_gpio_suspend,
.resume = jz4740_gpio_resume,
};
-static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
+static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
{
- int ret, irq;
-
- chip->sysdev.id = id;
- chip->sysdev.cls = &jz4740_gpio_sysdev_class;
- ret = sysdev_register(&chip->sysdev);
-
- if (ret)
- return ret;
+ int irq;
spin_lock_init(&chip->lock);
@@ -519,22 +516,17 @@ static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
irq_set_chip_and_handler(irq, &jz_gpio_irq_chip,
handle_level_irq);
}
-
- return 0;
}
static int __init jz4740_gpio_init(void)
{
unsigned int i;
- int ret;
-
- ret = sysdev_class_register(&jz4740_gpio_sysdev_class);
- if (ret)
- return ret;
for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i)
jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i);
+ register_syscore_ops(&jz4740_gpio_syscore_ops);
+
printk(KERN_INFO "JZ4740 GPIO initialized\n");
return 0;
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index feb8021a305..6a2d758dd8e 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -19,6 +19,26 @@
#include <asm-generic/sections.h>
+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+#define MCOUNT_OFFSET_INSNS 5
+#else
+#define MCOUNT_OFFSET_INSNS 4
+#endif
+
+/*
+ * Check if the address is in kernel space
+ *
+ * Clone core_kernel_text() from kernel/extable.c, but doesn't call
+ * init_kernel_text() for Ftrace doesn't trace functions in init sections.
+ */
+static inline int in_kernel_space(unsigned long ip)
+{
+ if (ip >= (unsigned long)_stext &&
+ ip <= (unsigned long)_etext)
+ return 1;
+ return 0;
+}
+
#ifdef CONFIG_DYNAMIC_FTRACE
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
@@ -54,20 +74,6 @@ static inline void ftrace_dyn_arch_init_insns(void)
#endif
}
-/*
- * Check if the address is in kernel space
- *
- * Clone core_kernel_text() from kernel/extable.c, but doesn't call
- * init_kernel_text() for Ftrace doesn't trace functions in init sections.
- */
-static inline int in_kernel_space(unsigned long ip)
-{
- if (ip >= (unsigned long)_stext &&
- ip <= (unsigned long)_etext)
- return 1;
- return 0;
-}
-
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
{
int faulted;
@@ -112,11 +118,6 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
* 1: offset = 4 instructions
*/
-#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
-#define MCOUNT_OFFSET_INSNS 5
-#else
-#define MCOUNT_OFFSET_INSNS 4
-#endif
#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
int ftrace_make_nop(struct module *mod,
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 5c74eb797f0..32b397b646e 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -229,7 +229,7 @@ static void i8259A_shutdown(void)
*/
if (i8259A_auto_eoi >= 0) {
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
- outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
+ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
}
}
@@ -295,6 +295,7 @@ static void init_8259A(int auto_eoi)
static struct irqaction irq2 = {
.handler = no_action,
.name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
static struct resource pic1_io_resource = {
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 876a75cc376..922a554cd10 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -349,3 +349,10 @@ SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
dfd, pathname);
}
+
+SYSCALL_DEFINE6(32_futex, u32 __user *, uaddr, int, op, u32, val,
+ struct compat_timespec __user *, utime, u32 __user *, uaddr2,
+ u32, val3)
+{
+ return compat_sys_futex(uaddr, op, val, utime, uaddr2, val3);
+}
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index f9296e894e4..6de1f598346 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -315,7 +315,7 @@ EXPORT(sysn32_call_table)
PTR sys_fremovexattr
PTR sys_tkill
PTR sys_ni_syscall
- PTR compat_sys_futex
+ PTR sys_32_futex
PTR compat_sys_sched_setaffinity /* 6195 */
PTR compat_sys_sched_getaffinity
PTR sys_cacheflush
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 4d7c9827706..1d813169e45 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -441,7 +441,7 @@ sys_call_table:
PTR sys_fremovexattr /* 4235 */
PTR sys_tkill
PTR sys_sendfile64
- PTR compat_sys_futex
+ PTR sys_32_futex
PTR compat_sys_sched_setaffinity
PTR compat_sys_sched_getaffinity /* 4240 */
PTR compat_sys_io_setup
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index dbbe0ce48d8..f8524003676 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -8,6 +8,7 @@
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/cache.h>
+#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/personality.h>
@@ -658,6 +659,8 @@ static void do_signal(struct pt_regs *regs)
asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
__u32 thread_info_flags)
{
+ local_irq_enable();
+
/* deal with pending signal delivery */
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
do_signal(regs);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index b7517e3abc8..cbea618af0b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -14,6 +14,7 @@
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -364,21 +365,26 @@ static int regs_to_trapnr(struct pt_regs *regs)
return (regs->cp0_cause >> 2) & 0x1f;
}
-static DEFINE_SPINLOCK(die_lock);
+static DEFINE_RAW_SPINLOCK(die_lock);
void __noreturn die(const char *str, struct pt_regs *regs)
{
static int die_counter;
int sig = SIGSEGV;
#ifdef CONFIG_MIPS_MT_SMTC
- unsigned long dvpret = dvpe();
+ unsigned long dvpret;
#endif /* CONFIG_MIPS_MT_SMTC */
+ oops_enter();
+
if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
sig = 0;
console_verbose();
- spin_lock_irq(&die_lock);
+ raw_spin_lock_irq(&die_lock);
+#ifdef CONFIG_MIPS_MT_SMTC
+ dvpret = dvpe();
+#endif /* CONFIG_MIPS_MT_SMTC */
bust_spinlocks(1);
#ifdef CONFIG_MIPS_MT_SMTC
mips_mt_regdump(dvpret);
@@ -387,7 +393,9 @@ void __noreturn die(const char *str, struct pt_regs *regs)
printk("%s[#%d]:\n", str, ++die_counter);
show_registers(regs);
add_taint(TAINT_DIE);
- spin_unlock_irq(&die_lock);
+ raw_spin_unlock_irq(&die_lock);
+
+ oops_exit();
if (in_interrupt())
panic("Fatal exception in interrupt");
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 2cd50ad0d5c..3efcb065f78 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -192,7 +192,7 @@ static struct tc *get_tc(int index)
}
spin_unlock(&vpecontrol.tc_list_lock);
- return NULL;
+ return res;
}
/* allocate a vpe and associate it with this minor (or index) */
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index fc89795cafd..f9737bb3c5a 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -123,11 +123,10 @@ void ltq_enable_irq(struct irq_data *d)
static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
{
int i;
- int irq_nr = d->irq - INT_NUM_IRQ0;
ltq_enable_irq(d);
for (i = 0; i < MAX_EIU; i++) {
- if (irq_nr == ltq_eiu_irq[i]) {
+ if (d->irq == ltq_eiu_irq[i]) {
/* low level - we should really handle set_type */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
(0x6 << (i * 4)), LTQ_EIU_EXIN_C);
@@ -147,11 +146,10 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
static void ltq_shutdown_eiu_irq(struct irq_data *d)
{
int i;
- int irq_nr = d->irq - INT_NUM_IRQ0;
ltq_disable_irq(d);
for (i = 0; i < MAX_EIU; i++) {
- if (irq_nr == ltq_eiu_irq[i]) {
+ if (d->irq == ltq_eiu_irq[i]) {
/* disable */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i),
LTQ_EIU_EXIN_INEN);
diff --git a/arch/mips/lantiq/xway/ebu.c b/arch/mips/lantiq/xway/ebu.c
index 66eb52fa50a..033b3184c7a 100644
--- a/arch/mips/lantiq/xway/ebu.c
+++ b/arch/mips/lantiq/xway/ebu.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/ioport.h>
#include <lantiq_soc.h>
diff --git a/arch/mips/lantiq/xway/pmu.c b/arch/mips/lantiq/xway/pmu.c
index 9d69f01e352..39f0d2641cb 100644
--- a/arch/mips/lantiq/xway/pmu.c
+++ b/arch/mips/lantiq/xway/pmu.c
@@ -8,7 +8,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/ioport.h>
#include <lantiq_soc.h>
diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c
index de4c165515d..d608b6ef0ed 100644
--- a/arch/mips/lasat/interrupt.c
+++ b/arch/mips/lasat/interrupt.c
@@ -105,6 +105,7 @@ asmlinkage void plat_irq_dispatch(void)
static struct irqaction cascade = {
.handler = no_action,
.name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
void __init arch_init_irq(void)
diff --git a/arch/mips/loongson/fuloong-2e/irq.c b/arch/mips/loongson/fuloong-2e/irq.c
index d61a04222b8..3cf1fef29f0 100644
--- a/arch/mips/loongson/fuloong-2e/irq.c
+++ b/arch/mips/loongson/fuloong-2e/irq.c
@@ -42,6 +42,7 @@ asmlinkage void mach_irq_dispatch(unsigned int pending)
static struct irqaction cascade_irqaction = {
.handler = no_action,
.name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
void __init mach_init_irq(void)
diff --git a/arch/mips/loongson/lemote-2f/irq.c b/arch/mips/loongson/lemote-2f/irq.c
index 081db102bb9..14b081841b6 100644
--- a/arch/mips/loongson/lemote-2f/irq.c
+++ b/arch/mips/loongson/lemote-2f/irq.c
@@ -96,12 +96,13 @@ static irqreturn_t ip6_action(int cpl, void *dev_id)
struct irqaction ip6_irqaction = {
.handler = ip6_action,
.name = "cascade",
- .flags = IRQF_SHARED,
+ .flags = IRQF_SHARED | IRQF_NO_THREAD,
};
struct irqaction cascade_irqaction = {
.handler = no_action,
.name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
void __init mach_init_irq(void)
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 9ff5d0fac55..302d779d5b0 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -6,6 +6,7 @@
* Copyright (C) 2011 Wind River Systems,
* written by Ralf Baechle <ralf@linux-mips.org>
*/
+#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/mman.h>
@@ -15,12 +16,11 @@
#include <linux/sched.h>
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
-
EXPORT_SYMBOL(shm_align_mask);
/* gap between mmap and stack */
#define MIN_GAP (128*1024*1024UL)
-#define MAX_GAP ((TASK_SIZE)/6*5)
+#define MAX_GAP ((TASK_SIZE)/6*5)
static int mmap_is_legacy(void)
{
@@ -57,13 +57,13 @@ static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
return base - off;
}
-#define COLOUR_ALIGN(addr,pgoff) \
+#define COLOUR_ALIGN(addr, pgoff) \
((((addr) + shm_align_mask) & ~shm_align_mask) + \
(((pgoff) << PAGE_SHIFT) & shm_align_mask))
enum mmap_allocation_direction {UP, DOWN};
-static unsigned long arch_get_unmapped_area_foo(struct file *filp,
+static unsigned long arch_get_unmapped_area_common(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags, enum mmap_allocation_direction dir)
{
@@ -103,16 +103,16 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
if (dir == UP) {
addr = mm->mmap_base;
- if (do_color_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- else
- addr = PAGE_ALIGN(addr);
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
@@ -131,28 +131,30 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
mm->free_area_cache = mm->mmap_base;
}
- /* either no address requested or can't fit in requested address hole */
+ /*
+ * either no address requested, or the mapping can't fit into
+ * the requested address hole
+ */
addr = mm->free_area_cache;
- if (do_color_align) {
- unsigned long base =
- COLOUR_ALIGN_DOWN(addr - len, pgoff);
-
+ if (do_color_align) {
+ unsigned long base =
+ COLOUR_ALIGN_DOWN(addr - len, pgoff);
addr = base + len;
- }
+ }
/* make sure it can fit in the remaining address space */
if (likely(addr > len)) {
vma = find_vma(mm, addr - len);
if (!vma || addr <= vma->vm_start) {
- /* remember the address as a hint for next time */
- return mm->free_area_cache = addr-len;
+ /* cache the address as a hint for next time */
+ return mm->free_area_cache = addr - len;
}
}
if (unlikely(mm->mmap_base < len))
goto bottomup;
- addr = mm->mmap_base-len;
+ addr = mm->mmap_base - len;
if (do_color_align)
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
@@ -163,8 +165,8 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
* return with success:
*/
vma = find_vma(mm, addr);
- if (likely(!vma || addr+len <= vma->vm_start)) {
- /* remember the address as a hint for next time */
+ if (likely(!vma || addr + len <= vma->vm_start)) {
+ /* cache the address as a hint for next time */
return mm->free_area_cache = addr;
}
@@ -173,7 +175,7 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
mm->cached_hole_size = vma->vm_start - addr;
/* try just below the current vma->vm_start */
- addr = vma->vm_start-len;
+ addr = vma->vm_start - len;
if (do_color_align)
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
} while (likely(len < vma->vm_start));
@@ -201,7 +203,7 @@ bottomup:
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
- return arch_get_unmapped_area_foo(filp,
+ return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, UP);
}
@@ -213,7 +215,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
- return arch_get_unmapped_area_foo(filp,
+ return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, DOWN);
}
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index b6e1cff5066..e06370f58ef 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1759,14 +1759,13 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
u32 *p = handle_tlbm;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
- struct work_registers wr;
memset(handle_tlbm, 0, sizeof(handle_tlbm));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
build_r3000_tlbchange_handler_head(&p, K0, K1);
- build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
+ build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
uasm_i_nop(&p); /* load delay */
build_make_write(&p, &r, K0, K1);
build_r3000_pte_reload_tlbwi(&p, K0, K1);
@@ -1963,7 +1962,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
uasm_i_andi(&p, wr.r3, wr.r3, 2);
uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
}
-
+ if (PM_DEFAULT_MASK == 0)
+ uasm_i_nop(&p);
/*
* We clobbered C0_PAGEMASK, restore it. On the other branch
* it is restored in build_huge_tlb_write_entry.
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index 1d36c511a7a..d53ff91b277 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -350,12 +350,14 @@ unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
static struct irqaction i8259irq = {
.handler = no_action,
- .name = "XT-PIC cascade"
+ .name = "XT-PIC cascade",
+ .flags = IRQF_NO_THREAD,
};
static struct irqaction corehi_irqaction = {
.handler = no_action,
- .name = "CoreHi"
+ .name = "CoreHi",
+ .flags = IRQF_NO_THREAD,
};
static msc_irqmap_t __initdata msc_irqmap[] = {
diff --git a/arch/mips/netlogic/xlr/Makefile b/arch/mips/netlogic/xlr/Makefile
index 9bd3f731f62..2dca585dd2f 100644
--- a/arch/mips/netlogic/xlr/Makefile
+++ b/arch/mips/netlogic/xlr/Makefile
@@ -2,4 +2,4 @@ obj-y += setup.o platform.o irq.o setup.o time.o
obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_EARLY_PRINTK) += xlr_console.o
-EXTRA_CFLAGS += -Werror
+ccflags-y += -Werror
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 603d7493e96..8656388b34b 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -171,8 +171,13 @@ static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
u32 temp_buffer;
/* set clock to 33Mhz */
- ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR);
- ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR);
+ if (ltq_is_ar9()) {
+ ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0x1f00000, LTQ_CGU_IFCCR);
+ ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0xe00000, LTQ_CGU_IFCCR);
+ } else {
+ ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR);
+ ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR);
+ }
/* external or internal clock ? */
if (conf->clock) {
diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c
index 764362ce5e4..5f3a69cebad 100644
--- a/arch/mips/pci/pci-rc32434.c
+++ b/arch/mips/pci/pci-rc32434.c
@@ -215,7 +215,7 @@ static int __init rc32434_pci_init(void)
rc32434_pcibridge_init();
io_map_base = ioremap(rc32434_res_pci_io1.start,
- resource_size(&rcrc32434_res_pci_io1));
+ resource_size(&rc32434_res_pci_io1));
if (!io_map_base)
return -ENOMEM;
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq.c b/arch/mips/pmc-sierra/msp71xx/msp_irq.c
index 4531c4a514b..d3c3d81757a 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_irq.c
@@ -108,12 +108,14 @@ asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
static struct irqaction cic_cascade_msp = {
.handler = no_action,
- .name = "MSP CIC cascade"
+ .name = "MSP CIC cascade",
+ .flags = IRQF_NO_THREAD,
};
static struct irqaction per_cascade_msp = {
.handler = no_action,
- .name = "MSP PER cascade"
+ .name = "MSP PER cascade",
+ .flags = IRQF_NO_THREAD,
};
void __init arch_init_irq(void)
diff --git a/arch/mips/pnx8550/common/int.c b/arch/mips/pnx8550/common/int.c
index 6b93c81779c..1ebe22bdadc 100644
--- a/arch/mips/pnx8550/common/int.c
+++ b/arch/mips/pnx8550/common/int.c
@@ -167,7 +167,7 @@ static struct irq_chip level_irq_type = {
static struct irqaction gic_action = {
.handler = no_action,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "GIC",
};
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index b4d08e4d2ea..f72c336ea27 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -155,32 +155,32 @@ static void __irq_entry indy_buserror_irq(void)
static struct irqaction local0_cascade = {
.handler = no_action,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "local0 cascade",
};
static struct irqaction local1_cascade = {
.handler = no_action,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "local1 cascade",
};
static struct irqaction buserr = {
.handler = no_action,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "Bus Error",
};
static struct irqaction map0_cascade = {
.handler = no_action,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "mapable0 cascade",
};
#ifdef USE_LIO3_IRQ
static struct irqaction map1_cascade = {
.handler = no_action,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "mapable1 cascade",
};
#define SGI_INTERRUPTS SGINT_END
diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
index a7e5a6d917b..3ab5b5d25b0 100644
--- a/arch/mips/sni/rm200.c
+++ b/arch/mips/sni/rm200.c
@@ -359,6 +359,7 @@ void sni_rm200_init_8259A(void)
static struct irqaction sni_rm200_irq2 = {
.handler = no_action,
.name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
static struct resource sni_rm200_pic1_resource = {
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
index 70a3b85f375..fad2bef432c 100644
--- a/arch/mips/vr41xx/common/irq.c
+++ b/arch/mips/vr41xx/common/irq.c
@@ -34,6 +34,7 @@ static irq_cascade_t irq_cascade[NR_IRQS] __cacheline_aligned;
static struct irqaction cascade_irqaction = {
.handler = no_action,
.name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int))
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 5cc83851ad0..31a7d3a7ce2 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -561,6 +561,20 @@ static struct pci_ops u4_pcie_pci_ops =
.write = u4_pcie_write_config,
};
+static void __devinit pmac_pci_fixup_u4_of_node(struct pci_dev *dev)
+{
+ /* Apple's device-tree "hides" the root complex virtual P2P bridge
+ * on U4. However, Linux sees it, causing the PCI <-> OF matching
+ * code to fail to properly match devices below it. This works around
+ * it by setting the node of the bridge to point to the PHB node,
+ * which is not entirely correct but fixes the matching code and
+ * doesn't break anything else. It's also the simplest possible fix.
+ */
+ if (dev->dev.of_node == NULL)
+ dev->dev.of_node = pcibios_get_phb_of_node(dev->bus);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, 0x5b, pmac_pci_fixup_u4_of_node);
+
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC32
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 64b61bf72e9..547f1a6a35d 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -188,7 +188,8 @@ extern char elf_platform[];
#define SET_PERSONALITY(ex) \
do { \
if (personality(current->personality) != PER_LINUX32) \
- set_personality(PER_LINUX); \
+ set_personality(PER_LINUX | \
+ (current->personality & ~PER_MASK)); \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
set_thread_flag(TIF_31BIT); \
else \
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 519eb5f187e..c0cb794bb36 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -658,12 +658,14 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
* struct gmap_struct - guest address space
* @mm: pointer to the parent mm_struct
* @table: pointer to the page directory
+ * @asce: address space control element for gmap page table
* @crst_list: list of all crst tables used in the guest address space
*/
struct gmap {
struct list_head list;
struct mm_struct *mm;
unsigned long *table;
+ unsigned long asce;
struct list_head crst_list;
};
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 532fd432215..2b45591e158 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -10,6 +10,7 @@
#include <linux/sched.h>
#include <asm/vdso.h>
#include <asm/sigp.h>
+#include <asm/pgtable.h>
/*
* Make sure that the compiler is new enough. We want a compiler that
@@ -126,6 +127,7 @@ int main(void)
DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
+ DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
@@ -151,6 +153,7 @@ int main(void)
DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
+ DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
#endif /* CONFIG_32BIT */
return 0;
}
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 5f729d627ce..713da076053 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -1076,6 +1076,11 @@ sie_loop:
lg %r14,__LC_THREAD_INFO # pointer thread_info struct
tm __TI_flags+7(%r14),_TIF_EXIT_SIE
jnz sie_exit
+ lg %r14,__LC_GMAP # get gmap pointer
+ ltgr %r14,%r14
+ jz sie_gmap
+ lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
+sie_gmap:
lg %r14,__SF_EMPTY(%r15) # get control block pointer
SPP __SF_EMPTY(%r15) # set guest id
sie 0(%r14)
@@ -1083,6 +1088,7 @@ sie_done:
SPP __LC_CMF_HPP # set host id
lg %r14,__LC_THREAD_INFO # pointer thread_info struct
sie_exit:
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
ni __TI_flags+6(%r14),255-(_TIF_SIE>>8)
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
stmg %r0,%r13,0(%r14) # save guest gprs 0-13
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f17296e4fc8..dc2b580e27b 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -123,6 +123,7 @@ int kvm_dev_ioctl_check_extension(long ext)
switch (ext) {
case KVM_CAP_S390_PSW:
+ case KVM_CAP_S390_GMAP:
r = 1;
break;
default:
@@ -263,10 +264,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
restore_fp_regs(&vcpu->arch.guest_fpregs);
restore_access_regs(vcpu->arch.guest_acrs);
+ gmap_enable(vcpu->arch.gmap);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
+ gmap_disable(vcpu->arch.gmap);
save_fp_regs(&vcpu->arch.guest_fpregs);
save_access_regs(vcpu->arch.guest_acrs);
restore_fp_regs(&vcpu->arch.host_fpregs);
@@ -461,7 +464,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
local_irq_disable();
kvm_guest_enter();
local_irq_enable();
- gmap_enable(vcpu->arch.gmap);
VCPU_EVENT(vcpu, 6, "entering sie flags %x",
atomic_read(&vcpu->arch.sie_block->cpuflags));
if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
@@ -470,7 +472,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
}
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode);
- gmap_disable(vcpu->arch.gmap);
local_irq_disable();
kvm_guest_exit();
local_irq_enable();
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 4d1f2bce87b..5d56c2b95b1 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -160,6 +160,8 @@ struct gmap *gmap_alloc(struct mm_struct *mm)
table = (unsigned long *) page_to_phys(page);
crst_table_init(table, _REGION1_ENTRY_EMPTY);
gmap->table = table;
+ gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | __pa(table);
list_add(&gmap->list, &mm->context.gmap_list);
return gmap;
@@ -240,10 +242,6 @@ EXPORT_SYMBOL_GPL(gmap_free);
*/
void gmap_enable(struct gmap *gmap)
{
- /* Load primary space page table origin. */
- S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | __pa(gmap->table);
- asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
S390_lowcore.gmap = (unsigned long) gmap;
}
EXPORT_SYMBOL_GPL(gmap_enable);
@@ -254,10 +252,6 @@ EXPORT_SYMBOL_GPL(gmap_enable);
*/
void gmap_disable(struct gmap *gmap)
{
- /* Load primary space page table origin. */
- S390_lowcore.user_asce =
- gmap->mm->context.asce_bits | __pa(gmap->mm->pgd);
- asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
S390_lowcore.gmap = 0UL;
}
EXPORT_SYMBOL_GPL(gmap_disable);
@@ -309,15 +303,15 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
/* Walk the guest addr space page table */
table = gmap->table + (((to + off) >> 53) & 0x7ff);
if (*table & _REGION_ENTRY_INV)
- return 0;
+ goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 42) & 0x7ff);
if (*table & _REGION_ENTRY_INV)
- return 0;
+ goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 31) & 0x7ff);
if (*table & _REGION_ENTRY_INV)
- return 0;
+ goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 20) & 0x7ff);
@@ -325,6 +319,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
flush |= gmap_unlink_segment(gmap, table);
*table = _SEGMENT_ENTRY_INV;
}
+out:
up_read(&gmap->mm->mmap_sem);
if (flush)
gmap_flush_tlb(gmap);
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
index 1407c07bdad..f6ae2b2b687 100644
--- a/arch/sparc/include/asm/pgtsrmmu.h
+++ b/arch/sparc/include/asm/pgtsrmmu.h
@@ -280,7 +280,7 @@ static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
return retval;
}
#else
-#define srmmu_hwprobe(addr) (srmmu_swprobe(addr, 0) & SRMMU_PTE_PMASK)
+#define srmmu_hwprobe(addr) srmmu_swprobe(addr, 0)
#endif
static inline int
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
index 55a17c6efeb..d06a2660175 100644
--- a/arch/sparc/include/asm/spitfire.h
+++ b/arch/sparc/include/asm/spitfire.h
@@ -43,6 +43,8 @@
#define SUN4V_CHIP_NIAGARA1 0x01
#define SUN4V_CHIP_NIAGARA2 0x02
#define SUN4V_CHIP_NIAGARA3 0x03
+#define SUN4V_CHIP_NIAGARA4 0x04
+#define SUN4V_CHIP_NIAGARA5 0x05
#define SUN4V_CHIP_UNKNOWN 0xff
#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/xor_64.h b/arch/sparc/include/asm/xor_64.h
index 9ed6ff679ab..ee8edc68423 100644
--- a/arch/sparc/include/asm/xor_64.h
+++ b/arch/sparc/include/asm/xor_64.h
@@ -66,6 +66,8 @@ static struct xor_block_template xor_block_niagara = {
((tlb_type == hypervisor && \
(sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \
sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || \
- sun4v_chip_type == SUN4V_CHIP_NIAGARA3)) ? \
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || \
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || \
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5)) ? \
&xor_block_niagara : \
&xor_block_VIS)
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 9810fd88105..ba9b1cec4e6 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -481,6 +481,18 @@ static void __init sun4v_cpu_probe(void)
sparc_pmu_type = "niagara3";
break;
+ case SUN4V_CHIP_NIAGARA4:
+ sparc_cpu_type = "UltraSparc T4 (Niagara4)";
+ sparc_fpu_type = "UltraSparc T4 integrated FPU";
+ sparc_pmu_type = "niagara4";
+ break;
+
+ case SUN4V_CHIP_NIAGARA5:
+ sparc_cpu_type = "UltraSparc T5 (Niagara5)";
+ sparc_fpu_type = "UltraSparc T5 integrated FPU";
+ sparc_pmu_type = "niagara5";
+ break;
+
default:
printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n",
prom_cpu_compatible);
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index 4197e8d62d4..9323eafccb9 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -325,6 +325,8 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
case SUN4V_CHIP_NIAGARA1:
case SUN4V_CHIP_NIAGARA2:
case SUN4V_CHIP_NIAGARA3:
+ case SUN4V_CHIP_NIAGARA4:
+ case SUN4V_CHIP_NIAGARA5:
rover_inc_table = niagara_iterate_method;
break;
default:
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 0eac1b2fc53..0d810c2f1d0 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -133,7 +133,7 @@ prom_sun4v_name:
prom_niagara_prefix:
.asciz "SUNW,UltraSPARC-T"
prom_sparc_prefix:
- .asciz "SPARC-T"
+ .asciz "SPARC-"
.align 4
prom_root_compatible:
.skip 64
@@ -396,7 +396,7 @@ sun4v_chip_type:
or %g1, %lo(prom_cpu_compatible), %g1
sethi %hi(prom_sparc_prefix), %g7
or %g7, %lo(prom_sparc_prefix), %g7
- mov 7, %g3
+ mov 6, %g3
90: ldub [%g7], %g2
ldub [%g1], %g4
cmp %g2, %g4
@@ -408,10 +408,23 @@ sun4v_chip_type:
sethi %hi(prom_cpu_compatible), %g1
or %g1, %lo(prom_cpu_compatible), %g1
- ldub [%g1 + 7], %g2
+ ldub [%g1 + 6], %g2
+ cmp %g2, 'T'
+ be,pt %xcc, 70f
+ cmp %g2, 'M'
+ bne,pn %xcc, 4f
+ nop
+
+70: ldub [%g1 + 7], %g2
cmp %g2, '3'
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA3, %g4
+ cmp %g2, '4'
+ be,pt %xcc, 5f
+ mov SUN4V_CHIP_NIAGARA4, %g4
+ cmp %g2, '5'
+ be,pt %xcc, 5f
+ mov SUN4V_CHIP_NIAGARA5, %g4
ba,pt %xcc, 4f
nop
@@ -545,6 +558,12 @@ niagara_tlb_fixup:
cmp %g1, SUN4V_CHIP_NIAGARA3
be,pt %xcc, niagara2_patch
nop
+ cmp %g1, SUN4V_CHIP_NIAGARA4
+ be,pt %xcc, niagara2_patch
+ nop
+ cmp %g1, SUN4V_CHIP_NIAGARA5
+ be,pt %xcc, niagara2_patch
+ nop
call generic_patch_copyops
nop
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 1e94f946570..8aa0d440858 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -230,7 +230,8 @@ static void pci_parse_of_addrs(struct platform_device *op,
res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
} else if (i == dev->rom_base_reg) {
res = &dev->resource[PCI_ROM_RESOURCE];
- flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
+ flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE
+ | IORESOURCE_SIZEALIGN;
} else {
printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
continue;
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index c8cc461ff75..f793742eec2 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -380,8 +380,7 @@ void flush_thread(void)
#endif
}
- /* Now, this task is no longer a kernel thread. */
- current->thread.current_ds = USER_DS;
+ /* This task is no longer a kernel thread. */
if (current->thread.flags & SPARC_FLAG_KTHREAD) {
current->thread.flags &= ~SPARC_FLAG_KTHREAD;
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index c158a95ec66..d959cd0a4aa 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -368,9 +368,6 @@ void flush_thread(void)
/* Clear FPU register state. */
t->fpsaved[0] = 0;
-
- if (get_thread_current_ds() != ASI_AIUS)
- set_fs(USER_DS);
}
/* It's a bit more tricky when 64-bit tasks are involved... */
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index d26e1f6c717..3e3e2914c70 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -137,7 +137,7 @@ static void __init process_switch(char c)
prom_halt();
break;
case 'p':
- /* Just ignore, this behavior is now the default. */
+ prom_early_console.flags &= ~CON_BOOT;
break;
default:
printk("Unknown boot switch (-%c)\n", c);
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 3c5bb784214..c965595aa7e 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -106,7 +106,7 @@ static void __init process_switch(char c)
prom_halt();
break;
case 'p':
- /* Just ignore, this behavior is now the default. */
+ prom_early_console.flags &= ~CON_BOOT;
break;
case 'P':
/* Force UltraSPARC-III P-Cache on. */
@@ -425,10 +425,14 @@ static void __init init_sparc64_elf_hwcap(void)
else if (tlb_type == hypervisor) {
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
- sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
cap |= HWCAP_SPARC_BLKINIT;
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
- sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
cap |= HWCAP_SPARC_N2;
}
@@ -452,11 +456,15 @@ static void __init init_sparc64_elf_hwcap(void)
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
cap |= AV_SPARC_ASI_BLK_INIT;
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
- sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
AV_SPARC_ASI_BLK_INIT |
AV_SPARC_POPC);
- if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
+ if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
AV_SPARC_FMAF);
}
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 1ba95aff5d5..2caa556db86 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -273,10 +273,7 @@ void do_sigreturn32(struct pt_regs *regs)
case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
}
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
return;
segv:
@@ -377,10 +374,7 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
}
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
return;
segv:
force_sig(SIGSEGV, current);
@@ -782,6 +776,7 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
+ sigset_t blocked;
int err;
if (ka->sa.sa_flags & SA_SIGINFO)
@@ -792,12 +787,10 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
if (err)
return err;
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NOMASK))
- sigaddset(&current->blocked,signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ sigaddset(&blocked, signr);
+ set_current_blocked(&blocked);
tracehook_signal_handler(signr, info, ka, regs, 0);
@@ -881,7 +874,7 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
*/
if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
- sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ set_current_blocked(&current->saved_sigmask);
}
}
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 04ede8f04ad..8ce247ac04c 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -62,12 +62,13 @@ struct rt_signal_frame {
static int _sigpause_common(old_sigset_t set)
{
- set &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
+ sigset_t blocked;
+
current->saved_sigmask = current->blocked;
- siginitset(&current->blocked, set);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+
+ set &= _BLOCKABLE;
+ siginitset(&blocked, set);
+ set_current_blocked(&blocked);
current->state = TASK_INTERRUPTIBLE;
schedule();
@@ -139,10 +140,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
goto segv_and_exit;
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
return;
segv_and_exit:
@@ -209,10 +207,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
}
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
return;
segv:
force_sig(SIGSEGV, current);
@@ -470,6 +465,7 @@ static inline int
handle_signal(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
{
+ sigset_t blocked;
int err;
if (ka->sa.sa_flags & SA_SIGINFO)
@@ -480,12 +476,10 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
if (err)
return err;
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NOMASK))
- sigaddset(&current->blocked, signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ sigaddset(&blocked, signr);
+ set_current_blocked(&blocked);
tracehook_signal_handler(signr, info, ka, regs, 0);
@@ -581,7 +575,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
*/
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
- sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ set_current_blocked(&current->saved_sigmask);
}
}
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 47509df3b89..a2b81598d90 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -70,10 +70,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
goto do_sigsegv;
}
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
}
if (test_thread_flag(TIF_32BIT)) {
pc &= 0xffffffff;
@@ -242,12 +239,13 @@ struct rt_signal_frame {
static long _sigpause_common(old_sigset_t set)
{
- set &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
+ sigset_t blocked;
+
current->saved_sigmask = current->blocked;
- siginitset(&current->blocked, set);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+
+ set &= _BLOCKABLE;
+ siginitset(&blocked, set);
+ set_current_blocked(&blocked);
current->state = TASK_INTERRUPTIBLE;
schedule();
@@ -327,10 +325,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
pt_regs_clear_syscall(regs);
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
return;
segv:
force_sig(SIGSEGV, current);
@@ -484,18 +479,17 @@ static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
+ sigset_t blocked;
int err;
err = setup_rt_frame(ka, regs, signr, oldset,
(ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
if (err)
return err;
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NOMASK))
- sigaddset(&current->blocked,signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ sigaddset(&blocked, signr);
+ set_current_blocked(&blocked);
tracehook_signal_handler(signr, info, ka, regs, 0);
@@ -601,7 +595,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
*/
if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
- sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ set_current_blocked(&current->saved_sigmask);
}
}
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 581531dbc8b..8e073d80213 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -511,6 +511,11 @@ static void __init read_obp_translations(void)
for (i = 0; i < prom_trans_ents; i++)
prom_trans[i].data &= ~0x0003fe0000000000UL;
}
+
+ /* Force execute bit on. */
+ for (i = 0; i < prom_trans_ents; i++)
+ prom_trans[i].data |= (tlb_type == hypervisor ?
+ _PAGE_EXEC_4V : _PAGE_EXEC_4U);
}
static void __init hypervisor_tlb_lock(unsigned long vaddr,
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index e485a680499..13c2169822a 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -162,7 +162,7 @@ ready:
printk(KERN_INFO "swprobe: padde %x\n", paddr_calc);
if (paddr)
*paddr = paddr_calc;
- return paddrbase;
+ return pte;
}
void leon_flush_icache_all(void)
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index fc94607f0bd..aecc8ed5f39 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -21,7 +21,7 @@
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/irqflags.h>
-#include <linux/atomic.h>
+#include <asm/atomic_32.h>
#include <asm/asm-offsets.h>
#include <hv/hypervisor.h>
#include <arch/abi.h>
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 1f75a2a5610..30638042691 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -70,7 +70,7 @@
*/
#include <linux/linkage.h>
-#include <linux/atomic.h>
+#include <asm/atomic_32.h>
#include <asm/page.h>
#include <asm/processor.h>
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index 4554cc6fb96..091508b533b 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -16,7 +16,6 @@
#endif
.macro altinstruction_entry orig alt feature orig_len alt_len
- .align 8
.long \orig - .
.long \alt - .
.word \feature
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 23fb6d79f20..37ad100a221 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -48,9 +48,6 @@ struct alt_instr {
u16 cpuid; /* cpuid bit set for replacement */
u8 instrlen; /* length of original instruction */
u8 replacementlen; /* length of new instruction, <= instrlen */
-#ifdef CONFIG_X86_64
- u32 pad2;
-#endif
};
extern void alternative_instructions(void);
@@ -83,7 +80,6 @@ static inline int alternatives_text_reserved(void *start, void *end)
\
"661:\n\t" oldinstr "\n662:\n" \
".section .altinstructions,\"a\"\n" \
- _ASM_ALIGN "\n" \
" .long 661b - .\n" /* label */ \
" .long 663f - .\n" /* new instruction */ \
" .word " __stringify(feature) "\n" /* feature bit */ \
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 4258aac99a6..88b23a43f34 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -332,7 +332,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
asm goto("1: jmp %l[t_no]\n"
"2:\n"
".section .altinstructions,\"a\"\n"
- _ASM_ALIGN "\n"
" .long 1b - .\n"
" .long 0\n" /* no replacement */
" .word %P0\n" /* feature bit */
@@ -350,7 +349,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
asm volatile("1: movb $0,%0\n"
"2:\n"
".section .altinstructions,\"a\"\n"
- _ASM_ALIGN "\n"
" .long 1b - .\n"
" .long 3f - .\n"
" .word %P1\n" /* feature bit */
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 7ff4669580c..c34f96c2f7a 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -12,6 +12,7 @@
#include <asm/pgtable.h>
#include <xen/interface/xen.h>
+#include <xen/grant_table.h>
#include <xen/features.h>
/* Xen machine address */
@@ -48,14 +49,11 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e);
extern int m2p_add_override(unsigned long mfn, struct page *page,
- bool clear_pte);
+ struct gnttab_map_grant_ref *kmap_op);
extern int m2p_remove_override(struct page *page, bool clear_pte);
extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
-#ifdef CONFIG_XEN_DEBUG_FS
-extern int p2m_dump_show(struct seq_file *m, void *v);
-#endif
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
unsigned long mfn;
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index f1a6244d7d9..794bc95134c 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -75,8 +75,10 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
/*
* Undefined/reserved opcodes, conditional jump, Opcode Extension
* Groups, and some special opcodes can not boost.
+ * This is non-const to keep gcc from statically optimizing it out, as
+ * variable_test_bit makes gcc think only *(unsigned long*) is used.
*/
-static const u32 twobyte_is_boostable[256 / 32] = {
+static u32 twobyte_is_boostable[256 / 32] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ---------------------------------------------- */
W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 3f2ad2640d8..ccdbc16b894 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -42,8 +42,11 @@ int mach_set_rtc_mmss(unsigned long nowtime)
{
int real_seconds, real_minutes, cmos_minutes;
unsigned char save_control, save_freq_select;
+ unsigned long flags;
int retval = 0;
+ spin_lock_irqsave(&rtc_lock, flags);
+
/* tell the clock it's being set */
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
@@ -93,12 +96,17 @@ int mach_set_rtc_mmss(unsigned long nowtime)
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
return retval;
}
unsigned long mach_get_cmos_time(void)
{
unsigned int status, year, mon, day, hour, min, sec, century = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
/*
* If UIP is clear, then we have >= 244 microseconds before
@@ -125,6 +133,8 @@ unsigned long mach_get_cmos_time(void)
status = CMOS_READ(RTC_CONTROL);
WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY));
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) {
sec = bcd2bin(sec);
min = bcd2bin(min);
@@ -169,24 +179,15 @@ EXPORT_SYMBOL(rtc_cmos_write);
int update_persistent_clock(struct timespec now)
{
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&rtc_lock, flags);
- retval = x86_platform.set_wallclock(now.tv_sec);
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- return retval;
+ return x86_platform.set_wallclock(now.tv_sec);
}
/* not static: needed by APM */
void read_persistent_clock(struct timespec *ts)
{
- unsigned long retval, flags;
+ unsigned long retval;
- spin_lock_irqsave(&rtc_lock, flags);
retval = x86_platform.get_wallclock();
- spin_unlock_irqrestore(&rtc_lock, flags);
ts->tv_sec = retval;
ts->tv_nsec = 0;
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 18ae83dd1cd..b56c65de384 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -56,7 +56,7 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
.lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
};
-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
+static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
static int __init vsyscall_setup(char *str)
{
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 6f08bc940fa..8b4cc5f067d 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -3603,7 +3603,7 @@ done_prefixes:
break;
case Src2CL:
ctxt->src2.bytes = 1;
- ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0x8;
+ ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0xff;
break;
case Src2ImmByte:
rc = decode_imm(ctxt, &ctxt->src2, 1, true);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1c5b69373a0..8e8da7960db 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -400,7 +400,8 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
/* xchg acts as a barrier before the setting of the high bits */
orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
- orig.spte_high = ssptep->spte_high = sspte.spte_high;
+ orig.spte_high = ssptep->spte_high;
+ ssptep->spte_high = sspte.spte_high;
count_spte_clear(sptep, spte);
return orig.spte;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 30326443ab8..87488b93a65 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -63,9 +63,8 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
#ifdef CONFIG_X86_32
/* for fixmap */
tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
-
- good_end = max_pfn_mapped << PAGE_SHIFT;
#endif
+ good_end = max_pfn_mapped << PAGE_SHIFT;
base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
if (base == MEMBLOCK_ERROR)
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 039d91315bc..404f21a3ff9 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -43,6 +43,17 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
},
},
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
+ /* 2006 AMD HT/VIA system with two host bridges */
+ {
+ .callback = set_use_crs,
+ .ident = "ASUS M2V-MX SE",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
+ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
+ },
+ },
{}
};
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 1017c7bee38..492ade8c978 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -175,8 +175,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
"pcifront-msi-x" :
"pcifront-msi",
DOMID_SELF);
- if (irq < 0)
+ if (irq < 0) {
+ ret = irq;
goto free;
+ }
i++;
}
kfree(v);
@@ -221,8 +223,10 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (msg.data != XEN_PIRQ_MSI_DATA ||
xen_irq_from_pirq(pirq) < 0) {
pirq = xen_allocate_pirq_msi(dev, msidesc);
- if (pirq < 0)
+ if (pirq < 0) {
+ irq = -ENODEV;
goto error;
+ }
xen_msi_compose_msg(dev, pirq, &msg);
__write_msi_msg(msidesc, &msg);
dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
@@ -244,10 +248,12 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
error:
dev_err(&dev->dev,
"Xen PCI frontend has not registered MSI/MSI-X support!\n");
- return -ENODEV;
+ return irq;
}
#ifdef CONFIG_XEN_DOM0
+static bool __read_mostly pci_seg_supported = true;
+
static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
int ret = 0;
@@ -265,10 +271,11 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
memset(&map_irq, 0, sizeof(map_irq));
map_irq.domid = domid;
- map_irq.type = MAP_PIRQ_TYPE_MSI;
+ map_irq.type = MAP_PIRQ_TYPE_MSI_SEG;
map_irq.index = -1;
map_irq.pirq = -1;
- map_irq.bus = dev->bus->number;
+ map_irq.bus = dev->bus->number |
+ (pci_domain_nr(dev->bus) << 16);
map_irq.devfn = dev->devfn;
if (type == PCI_CAP_ID_MSIX) {
@@ -285,7 +292,20 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
}
- ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
+ ret = -EINVAL;
+ if (pci_seg_supported)
+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
+ &map_irq);
+ if (ret == -EINVAL && !pci_domain_nr(dev->bus)) {
+ map_irq.type = MAP_PIRQ_TYPE_MSI;
+ map_irq.index = -1;
+ map_irq.pirq = -1;
+ map_irq.bus = dev->bus->number;
+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
+ &map_irq);
+ if (ret != -EINVAL)
+ pci_seg_supported = false;
+ }
if (ret) {
dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n",
ret, domid);
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index 58425adc22c..fe73276e026 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -678,38 +678,40 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
pentry = (struct sfi_device_table_entry *)sb->pentry;
for (i = 0; i < num; i++, pentry++) {
- if (pentry->irq != (u8)0xff) { /* native RTE case */
+ int irq = pentry->irq;
+
+ if (irq != (u8)0xff) { /* native RTE case */
/* these SPI2 devices are not exposed to system as PCI
* devices, but they have separate RTE entry in IOAPIC
* so we have to enable them one by one here
*/
- ioapic = mp_find_ioapic(pentry->irq);
+ ioapic = mp_find_ioapic(irq);
irq_attr.ioapic = ioapic;
- irq_attr.ioapic_pin = pentry->irq;
+ irq_attr.ioapic_pin = irq;
irq_attr.trigger = 1;
irq_attr.polarity = 1;
- io_apic_set_pci_routing(NULL, pentry->irq, &irq_attr);
+ io_apic_set_pci_routing(NULL, irq, &irq_attr);
} else
- pentry->irq = 0; /* No irq */
+ irq = 0; /* No irq */
switch (pentry->type) {
case SFI_DEV_TYPE_IPC:
/* ID as IRQ is a hack that will go away */
- pdev = platform_device_alloc(pentry->name, pentry->irq);
+ pdev = platform_device_alloc(pentry->name, irq);
if (pdev == NULL) {
pr_err("out of memory for SFI platform device '%s'.\n",
pentry->name);
continue;
}
- install_irq_resource(pdev, pentry->irq);
+ install_irq_resource(pdev, irq);
pr_debug("info[%2d]: IPC bus, name = %16.16s, "
- "irq = 0x%2x\n", i, pentry->name, pentry->irq);
+ "irq = 0x%2x\n", i, pentry->name, irq);
sfi_handle_ipc_dev(pdev);
break;
case SFI_DEV_TYPE_SPI:
memset(&spi_info, 0, sizeof(spi_info));
strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
- spi_info.irq = pentry->irq;
+ spi_info.irq = irq;
spi_info.bus_num = pentry->host_num;
spi_info.chip_select = pentry->addr;
spi_info.max_speed_hz = pentry->max_freq;
@@ -726,7 +728,7 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
memset(&i2c_info, 0, sizeof(i2c_info));
bus = pentry->host_num;
strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
- i2c_info.irq = pentry->irq;
+ i2c_info.irq = irq;
i2c_info.addr = pentry->addr;
pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, "
"irq = 0x%2x, addr = 0x%x\n", i, bus,
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c
index 73d70d65e76..6d5dbcdd444 100644
--- a/arch/x86/platform/mrst/vrtc.c
+++ b/arch/x86/platform/mrst/vrtc.c
@@ -58,8 +58,11 @@ EXPORT_SYMBOL_GPL(vrtc_cmos_write);
unsigned long vrtc_get_time(void)
{
u8 sec, min, hour, mday, mon;
+ unsigned long flags;
u32 year;
+ spin_lock_irqsave(&rtc_lock, flags);
+
while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP))
cpu_relax();
@@ -70,6 +73,8 @@ unsigned long vrtc_get_time(void)
mon = vrtc_cmos_read(RTC_MONTH);
year = vrtc_cmos_read(RTC_YEAR);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
/* vRTC YEAR reg contains the offset to 1960 */
year += 1960;
@@ -83,8 +88,10 @@ unsigned long vrtc_get_time(void)
int vrtc_set_mmss(unsigned long nowtime)
{
int real_sec, real_min;
+ unsigned long flags;
int vrtc_min;
+ spin_lock_irqsave(&rtc_lock, flags);
vrtc_min = vrtc_cmos_read(RTC_MINUTES);
real_sec = nowtime % 60;
@@ -95,6 +102,8 @@ int vrtc_set_mmss(unsigned long nowtime)
vrtc_cmos_write(real_sec, RTC_SECONDS);
vrtc_cmos_write(real_min, RTC_MINUTES);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
return 0;
}
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 5cc821cb2e0..26c731a106a 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -25,8 +25,7 @@ config XEN_PRIVILEGED_GUEST
config XEN_PVHVM
def_bool y
- depends on XEN
- depends on X86_LOCAL_APIC
+ depends on XEN && PCI && X86_LOCAL_APIC
config XEN_MAX_DOMAIN_MEMORY
int
@@ -49,11 +48,3 @@ config XEN_DEBUG_FS
help
Enable statistics output and various tuning options in debugfs.
Enabling this option may incur a significant performance overhead.
-
-config XEN_DEBUG
- bool "Enable Xen debug checks"
- depends on XEN
- default n
- help
- Enable various WARN_ON checks in the Xen MMU code.
- Enabling this option WILL incur a significant performance overhead.
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2d69617950f..da8afd576a6 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -251,6 +251,7 @@ static void __init xen_init_cpuid_mask(void)
~((1 << X86_FEATURE_APIC) | /* disable local APIC */
(1 << X86_FEATURE_ACPI)); /* disable ACPI */
ax = 1;
+ cx = 0;
xen_cpuid(&ax, &bx, &cx, &dx);
xsave_mask =
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 20a61427506..87f6673b120 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -495,41 +495,6 @@ static pte_t xen_make_pte(pteval_t pte)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
-#ifdef CONFIG_XEN_DEBUG
-pte_t xen_make_pte_debug(pteval_t pte)
-{
- phys_addr_t addr = (pte & PTE_PFN_MASK);
- phys_addr_t other_addr;
- bool io_page = false;
- pte_t _pte;
-
- if (pte & _PAGE_IOMAP)
- io_page = true;
-
- _pte = xen_make_pte(pte);
-
- if (!addr)
- return _pte;
-
- if (io_page &&
- (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
- other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT;
- WARN_ONCE(addr != other_addr,
- "0x%lx is using VM_IO, but it is 0x%lx!\n",
- (unsigned long)addr, (unsigned long)other_addr);
- } else {
- pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP;
- other_addr = (_pte.pte & PTE_PFN_MASK);
- WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set),
- "0x%lx is missing VM_IO (and wasn't fixed)!\n",
- (unsigned long)addr);
- }
-
- return _pte;
-}
-PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug);
-#endif
-
static pgd_t xen_make_pgd(pgdval_t pgd)
{
pgd = pte_pfn_to_mfn(pgd);
@@ -1721,10 +1686,8 @@ void __init xen_setup_machphys_mapping(void)
machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
}
#ifdef CONFIG_X86_32
- if ((machine_to_phys_mapping + machine_to_phys_nr)
- < machine_to_phys_mapping)
- machine_to_phys_nr = (unsigned long *)NULL
- - machine_to_phys_mapping;
+ WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
+ < machine_to_phys_mapping);
#endif
}
@@ -1994,9 +1957,6 @@ void __init xen_ident_map_ISA(void)
static void __init xen_post_allocator_init(void)
{
-#ifdef CONFIG_XEN_DEBUG
- pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
-#endif
pv_mmu_ops.set_pte = xen_set_pte;
pv_mmu_ops.set_pmd = xen_set_pmd;
pv_mmu_ops.set_pud = xen_set_pud;
@@ -2406,17 +2366,3 @@ out:
return err;
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
-
-#ifdef CONFIG_XEN_DEBUG_FS
-static int p2m_dump_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, p2m_dump_show, NULL);
-}
-
-static const struct file_operations p2m_dump_fops = {
- .open = p2m_dump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif /* CONFIG_XEN_DEBUG_FS */
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 58efeb9d544..1b267e75158 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -161,7 +161,9 @@
#include <asm/xen/page.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
+#include <xen/grant_table.h>
+#include "multicalls.h"
#include "xen-ops.h"
static void __init m2p_override_init(void);
@@ -676,7 +678,8 @@ static unsigned long mfn_hash(unsigned long mfn)
}
/* Add an MFN override for a particular page */
-int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte)
+int m2p_add_override(unsigned long mfn, struct page *page,
+ struct gnttab_map_grant_ref *kmap_op)
{
unsigned long flags;
unsigned long pfn;
@@ -692,16 +695,28 @@ int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte)
"m2p_add_override: pfn %lx not mapped", pfn))
return -EINVAL;
}
-
- page->private = mfn;
+ WARN_ON(PagePrivate(page));
+ SetPagePrivate(page);
+ set_page_private(page, mfn);
page->index = pfn_to_mfn(pfn);
if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
return -ENOMEM;
- if (clear_pte && !PageHighMem(page))
- /* Just zap old mapping for now */
- pte_clear(&init_mm, address, ptep);
+ if (kmap_op != NULL) {
+ if (!PageHighMem(page)) {
+ struct multicall_space mcs =
+ xen_mc_entry(sizeof(*kmap_op));
+
+ MULTI_grant_table_op(mcs.mc,
+ GNTTABOP_map_grant_ref, kmap_op, 1);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+ }
+ /* let's use dev_bus_addr to record the old mfn instead */
+ kmap_op->dev_bus_addr = page->index;
+ page->index = (unsigned long) kmap_op;
+ }
spin_lock_irqsave(&m2p_override_lock, flags);
list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
spin_unlock_irqrestore(&m2p_override_lock, flags);
@@ -735,13 +750,56 @@ int m2p_remove_override(struct page *page, bool clear_pte)
spin_lock_irqsave(&m2p_override_lock, flags);
list_del(&page->lru);
spin_unlock_irqrestore(&m2p_override_lock, flags);
- set_phys_to_machine(pfn, page->index);
+ WARN_ON(!PagePrivate(page));
+ ClearPagePrivate(page);
- if (clear_pte && !PageHighMem(page))
- set_pte_at(&init_mm, address, ptep,
- pfn_pte(pfn, PAGE_KERNEL));
- /* No tlb flush necessary because the caller already
- * left the pte unmapped. */
+ if (clear_pte) {
+ struct gnttab_map_grant_ref *map_op =
+ (struct gnttab_map_grant_ref *) page->index;
+ set_phys_to_machine(pfn, map_op->dev_bus_addr);
+ if (!PageHighMem(page)) {
+ struct multicall_space mcs;
+ struct gnttab_unmap_grant_ref *unmap_op;
+
+ /*
+ * It might be that we queued all the m2p grant table
+ * hypercalls in a multicall, then m2p_remove_override
+ * get called before the multicall has actually been
+ * issued. In this case handle is going to -1 because
+ * it hasn't been modified yet.
+ */
+ if (map_op->handle == -1)
+ xen_mc_flush();
+ /*
+ * Now if map_op->handle is negative it means that the
+ * hypercall actually returned an error.
+ */
+ if (map_op->handle == GNTST_general_error) {
+ printk(KERN_WARNING "m2p_remove_override: "
+ "pfn %lx mfn %lx, failed to modify kernel mappings",
+ pfn, mfn);
+ return -1;
+ }
+
+ mcs = xen_mc_entry(
+ sizeof(struct gnttab_unmap_grant_ref));
+ unmap_op = mcs.args;
+ unmap_op->host_addr = map_op->host_addr;
+ unmap_op->handle = map_op->handle;
+ unmap_op->dev_bus_addr = 0;
+
+ MULTI_grant_table_op(mcs.mc,
+ GNTTABOP_unmap_grant_ref, unmap_op, 1);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ set_pte_at(&init_mm, address, ptep,
+ pfn_pte(pfn, PAGE_KERNEL));
+ __flush_tlb_single(address);
+ map_op->host_addr = 0;
+ }
+ } else
+ set_phys_to_machine(pfn, page->index);
return 0;
}
@@ -758,7 +816,7 @@ struct page *m2p_find_override(unsigned long mfn)
spin_lock_irqsave(&m2p_override_lock, flags);
list_for_each_entry(p, bucket, lru) {
- if (p->private == mfn) {
+ if (page_private(p) == mfn) {
ret = p;
break;
}
@@ -782,17 +840,21 @@ unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
EXPORT_SYMBOL_GPL(m2p_find_override_pfn);
#ifdef CONFIG_XEN_DEBUG_FS
-
-int p2m_dump_show(struct seq_file *m, void *v)
+#include <linux/debugfs.h>
+#include "debugfs.h"
+static int p2m_dump_show(struct seq_file *m, void *v)
{
static const char * const level_name[] = { "top", "middle",
- "entry", "abnormal" };
- static const char * const type_name[] = { "identity", "missing",
- "pfn", "abnormal"};
+ "entry", "abnormal", "error"};
#define TYPE_IDENTITY 0
#define TYPE_MISSING 1
#define TYPE_PFN 2
#define TYPE_UNKNOWN 3
+ static const char * const type_name[] = {
+ [TYPE_IDENTITY] = "identity",
+ [TYPE_MISSING] = "missing",
+ [TYPE_PFN] = "pfn",
+ [TYPE_UNKNOWN] = "abnormal"};
unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0;
unsigned int uninitialized_var(prev_level);
unsigned int uninitialized_var(prev_type);
@@ -856,4 +918,32 @@ int p2m_dump_show(struct seq_file *m, void *v)
#undef TYPE_PFN
#undef TYPE_UNKNOWN
}
-#endif
+
+static int p2m_dump_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, p2m_dump_show, NULL);
+}
+
+static const struct file_operations p2m_dump_fops = {
+ .open = p2m_dump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *d_mmu_debug;
+
+static int __init xen_p2m_debugfs(void)
+{
+ struct dentry *d_xen = xen_init_debugfs();
+
+ if (d_xen == NULL)
+ return -ENOMEM;
+
+ d_mmu_debug = debugfs_create_dir("mmu", d_xen);
+
+ debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
+ return 0;
+}
+fs_initcall(xen_p2m_debugfs);
+#endif /* CONFIG_XEN_DEBUG_FS */
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index c3b8d440873..38d0af4fefe 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -37,7 +37,10 @@ extern void xen_syscall_target(void);
extern void xen_syscall32_target(void);
/* Amount of extra memory space we add to the e820 ranges */
-phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
+struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
+
+/* Number of pages released from the initial allocation. */
+unsigned long xen_released_pages;
/*
* The maximum amount of extra memory compared to the base size. The
@@ -51,48 +54,47 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
*/
#define EXTRA_MEM_RATIO (10)
-static void __init xen_add_extra_mem(unsigned long pages)
+static void __init xen_add_extra_mem(u64 start, u64 size)
{
unsigned long pfn;
+ int i;
- u64 size = (u64)pages * PAGE_SIZE;
- u64 extra_start = xen_extra_mem_start + xen_extra_mem_size;
-
- if (!pages)
- return;
-
- e820_add_region(extra_start, size, E820_RAM);
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
-
- memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA");
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ /* Add new region. */
+ if (xen_extra_mem[i].size == 0) {
+ xen_extra_mem[i].start = start;
+ xen_extra_mem[i].size = size;
+ break;
+ }
+ /* Append to existing region. */
+ if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
+ xen_extra_mem[i].size += size;
+ break;
+ }
+ }
+ if (i == XEN_EXTRA_MEM_MAX_REGIONS)
+ printk(KERN_WARNING "Warning: not enough extra memory regions\n");
- xen_extra_mem_size += size;
+ memblock_x86_reserve_range(start, start + size, "XEN EXTRA");
- xen_max_p2m_pfn = PFN_DOWN(extra_start + size);
+ xen_max_p2m_pfn = PFN_DOWN(start + size);
- for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++)
+ for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
-static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
- phys_addr_t end_addr)
+static unsigned long __init xen_release_chunk(unsigned long start,
+ unsigned long end)
{
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = 0,
.domid = DOMID_SELF
};
- unsigned long start, end;
unsigned long len = 0;
unsigned long pfn;
int ret;
- start = PFN_UP(start_addr);
- end = PFN_DOWN(end_addr);
-
- if (end <= start)
- return 0;
-
for(pfn = start; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn);
@@ -117,72 +119,52 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
return len;
}
-static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
- const struct e820map *e820)
+static unsigned long __init xen_set_identity_and_release(
+ const struct e820entry *list, size_t map_size, unsigned long nr_pages)
{
- phys_addr_t max_addr = PFN_PHYS(max_pfn);
- phys_addr_t last_end = ISA_END_ADDRESS;
+ phys_addr_t start = 0;
unsigned long released = 0;
- int i;
-
- /* Free any unused memory above the low 1Mbyte. */
- for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
- phys_addr_t end = e820->map[i].addr;
- end = min(max_addr, end);
-
- if (last_end < end)
- released += xen_release_chunk(last_end, end);
- last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
- }
-
- if (last_end < max_addr)
- released += xen_release_chunk(last_end, max_addr);
-
- printk(KERN_INFO "released %lu pages of unused memory\n", released);
- return released;
-}
-
-static unsigned long __init xen_set_identity(const struct e820entry *list,
- ssize_t map_size)
-{
- phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS;
- phys_addr_t start_pci = last;
- const struct e820entry *entry;
unsigned long identity = 0;
+ const struct e820entry *entry;
int i;
+ /*
+ * Combine non-RAM regions and gaps until a RAM region (or the
+ * end of the map) is reached, then set the 1:1 map and
+ * release the pages (if available) in those non-RAM regions.
+ *
+ * The combined non-RAM regions are rounded to a whole number
+ * of pages so any partial pages are accessible via the 1:1
+ * mapping. This is needed for some BIOSes that put (for
+ * example) the DMI tables in a reserved region that begins on
+ * a non-page boundary.
+ */
for (i = 0, entry = list; i < map_size; i++, entry++) {
- phys_addr_t start = entry->addr;
- phys_addr_t end = start + entry->size;
+ phys_addr_t end = entry->addr + entry->size;
- if (start < last)
- start = last;
+ if (entry->type == E820_RAM || i == map_size - 1) {
+ unsigned long start_pfn = PFN_DOWN(start);
+ unsigned long end_pfn = PFN_UP(end);
- if (end <= start)
- continue;
+ if (entry->type == E820_RAM)
+ end_pfn = PFN_UP(entry->addr);
- /* Skip over the 1MB region. */
- if (last > end)
- continue;
+ if (start_pfn < end_pfn) {
+ if (start_pfn < nr_pages)
+ released += xen_release_chunk(
+ start_pfn, min(end_pfn, nr_pages));
- if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) {
- if (start > start_pci)
identity += set_phys_range_identity(
- PFN_UP(start_pci), PFN_DOWN(start));
-
- /* Without saving 'last' we would gooble RAM too
- * at the end of the loop. */
- last = end;
- start_pci = end;
- continue;
+ start_pfn, end_pfn);
+ }
+ start = end;
}
- start_pci = min(start, start_pci);
- last = end;
}
- if (last > start_pci)
- identity += set_phys_range_identity(
- PFN_UP(start_pci), PFN_DOWN(last));
- return identity;
+
+ printk(KERN_INFO "Released %lu pages of unused memory\n", released);
+ printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
+
+ return released;
}
static unsigned long __init xen_get_max_pages(void)
@@ -197,21 +179,32 @@ static unsigned long __init xen_get_max_pages(void)
return min(max_pages, MAX_DOMAIN_PAGES);
}
+static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
+{
+ u64 end = start + size;
+
+ /* Align RAM regions to page boundaries. */
+ if (type == E820_RAM) {
+ start = PAGE_ALIGN(start);
+ end &= ~((u64)PAGE_SIZE - 1);
+ }
+
+ e820_add_region(start, end - start, type);
+}
+
/**
* machine_specific_memory_setup - Hook for machine specific memory setup.
**/
char * __init xen_memory_setup(void)
{
static struct e820entry map[E820MAX] __initdata;
- static struct e820entry map_raw[E820MAX] __initdata;
unsigned long max_pfn = xen_start_info->nr_pages;
unsigned long long mem_end;
int rc;
struct xen_memory_map memmap;
+ unsigned long max_pages;
unsigned long extra_pages = 0;
- unsigned long extra_limit;
- unsigned long identity_pages = 0;
int i;
int op;
@@ -237,58 +230,65 @@ char * __init xen_memory_setup(void)
}
BUG_ON(rc);
- memcpy(map_raw, map, sizeof(map));
- e820.nr_map = 0;
- xen_extra_mem_start = mem_end;
- for (i = 0; i < memmap.nr_entries; i++) {
- unsigned long long end;
-
- /* Guard against non-page aligned E820 entries. */
- if (map[i].type == E820_RAM)
- map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE;
-
- end = map[i].addr + map[i].size;
- if (map[i].type == E820_RAM && end > mem_end) {
- /* RAM off the end - may be partially included */
- u64 delta = min(map[i].size, end - mem_end);
-
- map[i].size -= delta;
- end -= delta;
-
- extra_pages += PFN_DOWN(delta);
- /*
- * Set RAM below 4GB that is not for us to be unusable.
- * This prevents "System RAM" address space from being
- * used as potential resource for I/O address (happens
- * when 'allocate_resource' is called).
- */
- if (delta &&
- (xen_initial_domain() && end < 0x100000000ULL))
- e820_add_region(end, delta, E820_UNUSABLE);
+ /* Make sure the Xen-supplied memory map is well-ordered. */
+ sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
+
+ max_pages = xen_get_max_pages();
+ if (max_pages > max_pfn)
+ extra_pages += max_pages - max_pfn;
+
+ /*
+ * Set P2M for all non-RAM pages and E820 gaps to be identity
+ * type PFNs. Any RAM pages that would be made inaccesible by
+ * this are first released.
+ */
+ xen_released_pages = xen_set_identity_and_release(
+ map, memmap.nr_entries, max_pfn);
+ extra_pages += xen_released_pages;
+
+ /*
+ * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
+ * factor the base size. On non-highmem systems, the base
+ * size is the full initial memory allocation; on highmem it
+ * is limited to the max size of lowmem, so that it doesn't
+ * get completely filled.
+ *
+ * In principle there could be a problem in lowmem systems if
+ * the initial memory is also very large with respect to
+ * lowmem, but we won't try to deal with that here.
+ */
+ extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
+ extra_pages);
+
+ i = 0;
+ while (i < memmap.nr_entries) {
+ u64 addr = map[i].addr;
+ u64 size = map[i].size;
+ u32 type = map[i].type;
+
+ if (type == E820_RAM) {
+ if (addr < mem_end) {
+ size = min(size, mem_end - addr);
+ } else if (extra_pages) {
+ size = min(size, (u64)extra_pages * PAGE_SIZE);
+ extra_pages -= size / PAGE_SIZE;
+ xen_add_extra_mem(addr, size);
+ } else
+ type = E820_UNUSABLE;
}
- if (map[i].size > 0 && end > xen_extra_mem_start)
- xen_extra_mem_start = end;
+ xen_align_and_add_e820_region(addr, size, type);
- /* Add region if any remains */
- if (map[i].size > 0)
- e820_add_region(map[i].addr, map[i].size, map[i].type);
+ map[i].addr += size;
+ map[i].size -= size;
+ if (map[i].size == 0)
+ i++;
}
- /* Align the balloon area so that max_low_pfn does not get set
- * to be at the _end_ of the PCI gap at the far end (fee01000).
- * Note that xen_extra_mem_start gets set in the loop above to be
- * past the last E820 region. */
- if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32)))
- xen_extra_mem_start = (1ULL<<32);
/*
* In domU, the ISA region is normal, usable memory, but we
* reserve ISA memory anyway because too many things poke
* about in there.
- *
- * In Dom0, the host E820 information can leave gaps in the
- * ISA range, which would cause us to release those pages. To
- * avoid this, we unconditionally reserve them here.
*/
e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
E820_RESERVED);
@@ -305,42 +305,6 @@ char * __init xen_memory_setup(void)
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
- extra_limit = xen_get_max_pages();
- if (extra_limit >= max_pfn)
- extra_pages = extra_limit - max_pfn;
- else
- extra_pages = 0;
-
- extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
-
- /*
- * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
- * factor the base size. On non-highmem systems, the base
- * size is the full initial memory allocation; on highmem it
- * is limited to the max size of lowmem, so that it doesn't
- * get completely filled.
- *
- * In principle there could be a problem in lowmem systems if
- * the initial memory is also very large with respect to
- * lowmem, but we won't try to deal with that here.
- */
- extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
- max_pfn + extra_pages);
-
- if (extra_limit >= max_pfn)
- extra_pages = extra_limit - max_pfn;
- else
- extra_pages = 0;
-
- xen_add_extra_mem(extra_pages);
-
- /*
- * Set P2M for all non-RAM pages and E820 gaps to be identity
- * type PFNs. We supply it with the non-sanitized version
- * of the E820.
- */
- identity_pages = xen_set_identity(map_raw, memmap.nr_entries);
- printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages);
return "Xen";
}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index d4fc6d454f8..041d4fe9dfe 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -532,7 +532,6 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
WARN_ON(xen_smp_intr_init(0));
xen_init_lock_cpu(0);
- xen_init_spinlocks();
}
static int __cpuinit xen_hvm_cpu_up(unsigned int cpu)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index bcaf16ee6ad..b596e54ddd7 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -785,10 +785,10 @@ static int blkio_policy_parse_and_set(char *buf,
{
char *s[4], *p, *major_s = NULL, *minor_s = NULL;
int ret;
- unsigned long major, minor, temp;
+ unsigned long major, minor;
int i = 0;
dev_t dev;
- u64 bps, iops;
+ u64 temp;
memset(s, 0, sizeof(s));
@@ -826,20 +826,23 @@ static int blkio_policy_parse_and_set(char *buf,
dev = MKDEV(major, minor);
- ret = blkio_check_dev_num(dev);
+ ret = strict_strtoull(s[1], 10, &temp);
if (ret)
- return ret;
+ return -EINVAL;
- newpn->dev = dev;
+ /* For rule removal, do not check for device presence. */
+ if (temp) {
+ ret = blkio_check_dev_num(dev);
+ if (ret)
+ return ret;
+ }
- if (s[1] == NULL)
- return -EINVAL;
+ newpn->dev = dev;
switch (plid) {
case BLKIO_POLICY_PROP:
- ret = strict_strtoul(s[1], 10, &temp);
- if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
- temp > BLKIO_WEIGHT_MAX)
+ if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
+ temp > BLKIO_WEIGHT_MAX)
return -EINVAL;
newpn->plid = plid;
@@ -850,26 +853,18 @@ static int blkio_policy_parse_and_set(char *buf,
switch(fileid) {
case BLKIO_THROTL_read_bps_device:
case BLKIO_THROTL_write_bps_device:
- ret = strict_strtoull(s[1], 10, &bps);
- if (ret)
- return -EINVAL;
-
newpn->plid = plid;
newpn->fileid = fileid;
- newpn->val.bps = bps;
+ newpn->val.bps = temp;
break;
case BLKIO_THROTL_read_iops_device:
case BLKIO_THROTL_write_iops_device:
- ret = strict_strtoull(s[1], 10, &iops);
- if (ret)
- return -EINVAL;
-
- if (iops > THROTL_IOPS_MAX)
+ if (temp > THROTL_IOPS_MAX)
return -EINVAL;
newpn->plid = plid;
newpn->fileid = fileid;
- newpn->val.iops = (unsigned int)iops;
+ newpn->val.iops = (unsigned int)temp;
break;
}
break;
diff --git a/block/blk-core.c b/block/blk-core.c
index 90e1ffdeb41..d34433ae791 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -348,9 +348,10 @@ void blk_put_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_put_queue);
/*
- * Note: If a driver supplied the queue lock, it should not zap that lock
- * unexpectedly as some queue cleanup components like elevator_exit() and
- * blk_throtl_exit() need queue lock.
+ * Note: If a driver supplied the queue lock, it is disconnected
+ * by this function. The actual state of the lock doesn't matter
+ * here as the request_queue isn't accessible after this point
+ * (QUEUE_FLAG_DEAD is set) and no other requests will be queued.
*/
void blk_cleanup_queue(struct request_queue *q)
{
@@ -367,10 +368,8 @@ void blk_cleanup_queue(struct request_queue *q)
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
mutex_unlock(&q->sysfs_lock);
- if (q->elevator)
- elevator_exit(q->elevator);
-
- blk_throtl_exit(q);
+ if (q->queue_lock != &q->__queue_lock)
+ q->queue_lock = &q->__queue_lock;
blk_put_queue(q);
}
@@ -1167,7 +1166,7 @@ static bool bio_attempt_front_merge(struct request_queue *q,
* true if merge was successful, otherwise false.
*/
static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
- struct bio *bio)
+ struct bio *bio, unsigned int *request_count)
{
struct blk_plug *plug;
struct request *rq;
@@ -1176,10 +1175,13 @@ static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
plug = tsk->plug;
if (!plug)
goto out;
+ *request_count = 0;
list_for_each_entry_reverse(rq, &plug->list, queuelist) {
int el_ret;
+ (*request_count)++;
+
if (rq->q != q)
continue;
@@ -1219,6 +1221,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
struct blk_plug *plug;
int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
struct request *req;
+ unsigned int request_count = 0;
/*
* low level driver can indicate that it wants pages above a
@@ -1237,7 +1240,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
* Check if we can merge with the plugged list before grabbing
* any locks.
*/
- if (attempt_plug_merge(current, q, bio))
+ if (attempt_plug_merge(current, q, bio, &request_count))
goto out;
spin_lock_irq(q->queue_lock);
@@ -1302,11 +1305,10 @@ get_rq:
if (__rq->q != q)
plug->should_sort = 1;
}
+ if (request_count >= BLK_MAX_REQUEST_COUNT)
+ blk_flush_plug_list(plug, false);
list_add_tail(&req->queuelist, &plug->list);
- plug->count++;
drive_stat_acct(req, 1);
- if (plug->count >= BLK_MAX_REQUEST_COUNT)
- blk_flush_plug_list(plug, false);
} else {
spin_lock_irq(q->queue_lock);
add_acct_request(q, req, where);
@@ -2634,7 +2636,6 @@ void blk_start_plug(struct blk_plug *plug)
INIT_LIST_HEAD(&plug->list);
INIT_LIST_HEAD(&plug->cb_list);
plug->should_sort = 0;
- plug->count = 0;
/*
* If this is a nested plug, don't actually assign it. It will be
@@ -2718,7 +2719,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
return;
list_splice_init(&plug->list, &list);
- plug->count = 0;
if (plug->should_sort) {
list_sort(NULL, &list, plug_rq_cmp);
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 58340d0cb23..1366a89d8e6 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -115,7 +115,7 @@ void __blk_complete_request(struct request *req)
/*
* Select completion CPU
*/
- if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1) {
+ if (req->cpu != -1) {
ccpu = req->cpu;
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) {
ccpu = blk_cpu_to_group(ccpu);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 0ee17b5e7fb..60fda88c57f 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -258,11 +258,13 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
ret = queue_var_store(&val, page, count);
spin_lock_irq(q->queue_lock);
- if (val) {
+ if (val == 2) {
queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
- if (val == 2)
- queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
- } else {
+ queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
+ } else if (val == 1) {
+ queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+ queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+ } else if (val == 0) {
queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
}
@@ -477,6 +479,11 @@ static void blk_release_queue(struct kobject *kobj)
blk_sync_queue(q);
+ if (q->elevator)
+ elevator_exit(q->elevator);
+
+ blk_throtl_exit(q);
+
if (rl->rq_pool)
mempool_destroy(rl->rq_pool);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index a33bd4377c6..16ace89613b 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -130,8 +130,8 @@ struct cfq_queue {
unsigned long slice_end;
long slice_resid;
- /* pending metadata requests */
- int meta_pending;
+ /* pending priority requests */
+ int prio_pending;
/* number of requests that are on the dispatch list or inside driver */
int dispatched;
@@ -684,8 +684,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
if (rq_is_sync(rq1) != rq_is_sync(rq2))
return rq_is_sync(rq1) ? rq1 : rq2;
- if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_META)
- return rq1->cmd_flags & REQ_META ? rq1 : rq2;
+ if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
+ return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
s1 = blk_rq_pos(rq1);
s2 = blk_rq_pos(rq2);
@@ -1612,9 +1612,9 @@ static void cfq_remove_request(struct request *rq)
cfqq->cfqd->rq_queued--;
cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
rq_data_dir(rq), rq_is_sync(rq));
- if (rq->cmd_flags & REQ_META) {
- WARN_ON(!cfqq->meta_pending);
- cfqq->meta_pending--;
+ if (rq->cmd_flags & REQ_PRIO) {
+ WARN_ON(!cfqq->prio_pending);
+ cfqq->prio_pending--;
}
}
@@ -3372,7 +3372,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
* So both queues are sync. Let the new request get disk time if
* it's a metadata request and the current queue is doing regular IO.
*/
- if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
+ if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
return true;
/*
@@ -3439,8 +3439,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_io_context *cic = RQ_CIC(rq);
cfqd->rq_queued++;
- if (rq->cmd_flags & REQ_META)
- cfqq->meta_pending++;
+ if (rq->cmd_flags & REQ_PRIO)
+ cfqq->prio_pending++;
cfq_update_io_thinktime(cfqd, cfqq, cic);
cfq_update_io_seektime(cfqd, cfqq, rq);
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index be442561693..7835b8fc94d 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -67,6 +67,9 @@ static int ghash_update(struct shash_desc *desc,
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *dst = dctx->buffer;
+ if (!ctx->gf128)
+ return -ENOKEY;
+
if (dctx->bytes) {
int n = min(srclen, dctx->bytes);
u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
@@ -119,6 +122,9 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *buf = dctx->buffer;
+ if (!ctx->gf128)
+ return -ENOKEY;
+
ghash_flush(ctx, dctx);
memcpy(dst, buf, GHASH_BLOCK_SIZE);
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 2c18d584066..b97294e2d95 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -42,6 +42,22 @@ static struct pm_clk_data *__to_pcd(struct device *dev)
}
/**
+ * pm_clk_acquire - Acquire a device clock.
+ * @dev: Device whose clock is to be acquired.
+ * @ce: PM clock entry corresponding to the clock.
+ */
+static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
+{
+ ce->clk = clk_get(dev, ce->con_id);
+ if (IS_ERR(ce->clk)) {
+ ce->status = PCE_STATUS_ERROR;
+ } else {
+ ce->status = PCE_STATUS_ACQUIRED;
+ dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
+ }
+}
+
+/**
* pm_clk_add - Start using a device clock for power management.
* @dev: Device whose clock is going to be used for power management.
* @con_id: Connection ID of the clock.
@@ -73,6 +89,8 @@ int pm_clk_add(struct device *dev, const char *con_id)
}
}
+ pm_clk_acquire(dev, ce);
+
spin_lock_irq(&pcd->lock);
list_add_tail(&ce->node, &pcd->clock_list);
spin_unlock_irq(&pcd->lock);
@@ -82,17 +100,12 @@ int pm_clk_add(struct device *dev, const char *con_id)
/**
* __pm_clk_remove - Destroy PM clock entry.
* @ce: PM clock entry to destroy.
- *
- * This routine must be called under the spinlock protecting the PM list of
- * clocks corresponding the the @ce's device.
*/
static void __pm_clk_remove(struct pm_clock_entry *ce)
{
if (!ce)
return;
- list_del(&ce->node);
-
if (ce->status < PCE_STATUS_ERROR) {
if (ce->status == PCE_STATUS_ENABLED)
clk_disable(ce->clk);
@@ -126,18 +139,22 @@ void pm_clk_remove(struct device *dev, const char *con_id)
spin_lock_irq(&pcd->lock);
list_for_each_entry(ce, &pcd->clock_list, node) {
- if (!con_id && !ce->con_id) {
- __pm_clk_remove(ce);
- break;
- } else if (!con_id || !ce->con_id) {
+ if (!con_id && !ce->con_id)
+ goto remove;
+ else if (!con_id || !ce->con_id)
continue;
- } else if (!strcmp(con_id, ce->con_id)) {
- __pm_clk_remove(ce);
- break;
- }
+ else if (!strcmp(con_id, ce->con_id))
+ goto remove;
}
spin_unlock_irq(&pcd->lock);
+ return;
+
+ remove:
+ list_del(&ce->node);
+ spin_unlock_irq(&pcd->lock);
+
+ __pm_clk_remove(ce);
}
/**
@@ -175,20 +192,27 @@ void pm_clk_destroy(struct device *dev)
{
struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce, *c;
+ struct list_head list;
if (!pcd)
return;
dev->power.subsys_data = NULL;
+ INIT_LIST_HEAD(&list);
spin_lock_irq(&pcd->lock);
list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
- __pm_clk_remove(ce);
+ list_move(&ce->node, &list);
spin_unlock_irq(&pcd->lock);
kfree(pcd);
+
+ list_for_each_entry_safe_reverse(ce, c, &list, node) {
+ list_del(&ce->node);
+ __pm_clk_remove(ce);
+ }
}
#endif /* CONFIG_PM */
@@ -196,23 +220,6 @@ void pm_clk_destroy(struct device *dev)
#ifdef CONFIG_PM_RUNTIME
/**
- * pm_clk_acquire - Acquire a device clock.
- * @dev: Device whose clock is to be acquired.
- * @con_id: Connection ID of the clock.
- */
-static void pm_clk_acquire(struct device *dev,
- struct pm_clock_entry *ce)
-{
- ce->clk = clk_get(dev, ce->con_id);
- if (IS_ERR(ce->clk)) {
- ce->status = PCE_STATUS_ERROR;
- } else {
- ce->status = PCE_STATUS_ACQUIRED;
- dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
- }
-}
-
-/**
* pm_clk_suspend - Disable clocks in a device's PM clock list.
* @dev: Device to disable the clocks for.
*/
@@ -230,9 +237,6 @@ int pm_clk_suspend(struct device *dev)
spin_lock_irqsave(&pcd->lock, flags);
list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
- if (ce->status == PCE_STATUS_NONE)
- pm_clk_acquire(dev, ce);
-
if (ce->status < PCE_STATUS_ERROR) {
clk_disable(ce->clk);
ce->status = PCE_STATUS_ACQUIRED;
@@ -262,9 +266,6 @@ int pm_clk_resume(struct device *dev)
spin_lock_irqsave(&pcd->lock, flags);
list_for_each_entry(ce, &pcd->clock_list, node) {
- if (ce->status == PCE_STATUS_NONE)
- pm_clk_acquire(dev, ce);
-
if (ce->status < PCE_STATUS_ERROR) {
clk_enable(ce->clk);
ce->status = PCE_STATUS_ENABLED;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 98de8f41867..9955a53733b 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4250,7 +4250,7 @@ static int __init floppy_init(void)
use_virtual_dma = can_use_virtual_dma & 1;
fdc_state[0].address = FDC1;
if (fdc_state[0].address == -1) {
- del_timer(&fd_timeout);
+ del_timer_sync(&fd_timeout);
err = -ENODEV;
goto out_unreg_region;
}
@@ -4261,7 +4261,7 @@ static int __init floppy_init(void)
fdc = 0; /* reset fdc in case of unexpected interrupt */
err = floppy_grab_irq_and_dma();
if (err) {
- del_timer(&fd_timeout);
+ del_timer_sync(&fd_timeout);
err = -EBUSY;
goto out_unreg_region;
}
@@ -4318,7 +4318,7 @@ static int __init floppy_init(void)
user_reset_fdc(-1, FD_RESET_ALWAYS, false);
}
fdc = 0;
- del_timer(&fd_timeout);
+ del_timer_sync(&fd_timeout);
current_drive = 0;
initialized = true;
if (have_no_fdc) {
@@ -4368,7 +4368,7 @@ out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
while (dr--) {
- del_timer(&motor_off_timer[dr]);
+ del_timer_sync(&motor_off_timer[dr]);
if (disks[dr]->queue)
blk_cleanup_queue(disks[dr]->queue);
put_disk(disks[dr]);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 2330a9ad5e9..1540792b1e5 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -396,7 +396,7 @@ static int xen_blkbk_map(struct blkif_request *req,
continue;
ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
- blkbk->pending_page(pending_req, i), false);
+ blkbk->pending_page(pending_req, i), NULL);
if (ret) {
pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n",
(unsigned long)map[i].dev_bus_addr, ret);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 0d04c7fb7ce..c4bd34063ec 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -45,7 +45,7 @@
#define DRV_PFX "xen-blkback:"
#define DPRINTK(fmt, args...) \
- pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \
+ pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \
__func__, __LINE__, ##args)
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3f129b45451..5fd2010f7d2 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -590,7 +590,7 @@ static void frontend_changed(struct xenbus_device *dev,
/*
* Enforce precondition before potential leak point.
- * blkif_disconnect() is idempotent.
+ * xen_blkif_disconnect() is idempotent.
*/
xen_blkif_disconnect(be->blkif);
@@ -601,17 +601,17 @@ static void frontend_changed(struct xenbus_device *dev,
break;
case XenbusStateClosing:
- xen_blkif_disconnect(be->blkif);
xenbus_switch_state(dev, XenbusStateClosing);
break;
case XenbusStateClosed:
+ xen_blkif_disconnect(be->blkif);
xenbus_switch_state(dev, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
/* fall through if not online */
case XenbusStateUnknown:
- /* implies blkif_disconnect() via blkback_remove() */
+ /* implies xen_blkif_disconnect() via xen_blkbk_remove() */
device_unregister(&dev->dev);
break;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3ef476070ba..9cbac6b445e 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -72,9 +72,15 @@ static struct usb_device_id btusb_table[] = {
/* Apple MacBookAir3,1, MacBookAir3,2 */
{ USB_DEVICE(0x05ac, 0x821b) },
+ /* Apple MacBookAir4,1 */
+ { USB_DEVICE(0x05ac, 0x821f) },
+
/* Apple MacBookPro8,2 */
{ USB_DEVICE(0x05ac, 0x821a) },
+ /* Apple MacMini5,1 */
+ { USB_DEVICE(0x05ac, 0x8281) },
+
/* AVM BlueFRITZ! USB v2.0 */
{ USB_DEVICE(0x057c, 0x3800) },
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 65d27aff553..04d353f58d7 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -125,6 +125,13 @@ static long st_receive(void *priv_data, struct sk_buff *skb)
/* protocol structure registered with shared transport */
static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
{
+ .chnl_id = HCI_EVENT_PKT, /* HCI Events */
+ .hdr_len = sizeof(struct hci_event_hdr),
+ .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
+ .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
+ .reserve = 8,
+ },
+ {
.chnl_id = HCI_ACLDATA_PKT, /* ACL */
.hdr_len = sizeof(struct hci_acl_hdr),
.offset_len_in_hdr = offsetof(struct hci_acl_hdr, dlen),
@@ -138,13 +145,6 @@ static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
.len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */
.reserve = 8,
},
- {
- .chnl_id = HCI_EVENT_PKT, /* HCI Events */
- .hdr_len = sizeof(struct hci_event_hdr),
- .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
- .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
- .reserve = 8,
- },
};
/* Called from HCI core to initialize the device */
@@ -240,7 +240,7 @@ static int ti_st_close(struct hci_dev *hdev)
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
- for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
+ for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) {
err = st_unregister(&ti_st_proto[i]);
if (err)
BT_ERR("st_unregister(%d) failed with error %d",
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index ae6a9330632..f4837a893df 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -297,17 +297,13 @@ apm_ioctl(struct file *filp, u_int cmd, u_long arg)
/*
* Wait for the suspend/resume to complete. If there
* are pending acknowledges, we wait here for them.
+ * wait_event_freezable() is interruptible and pending
+ * signal can cause busy looping. We aren't doing
+ * anything critical, chill a bit on each iteration.
*/
- freezer_do_not_count();
-
- wait_event(apm_suspend_waitqueue,
- as->suspend_state == SUSPEND_DONE);
-
- /*
- * Since we are waiting until the suspend is done, the
- * try_to_freeze() in freezer_count() will not trigger
- */
- freezer_count();
+ while (wait_event_freezable(apm_suspend_waitqueue,
+ as->suspend_state == SUSPEND_DONE))
+ msleep(10);
break;
case SUSPEND_ACKTO:
as->suspend_result = -ETIMEDOUT;
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index f6595aba4f0..fa567f1158c 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -43,6 +43,7 @@ config TCG_NSC
config TCG_ATMEL
tristate "Atmel TPM Interface"
+ depends on PPC64 || HAS_IOPORT
---help---
If you have a TPM security chip from Atmel say Yes and it
will be accessible from within Linux. To compile this driver
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index caf8012ef47..361a1dff8f7 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -383,6 +383,9 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
u32 count, ordinal;
unsigned long stop;
+ if (bufsiz > TPM_BUFSIZE)
+ bufsiz = TPM_BUFSIZE;
+
count = be32_to_cpu(*((__be32 *) (buf + 2)));
ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
if (count == 0)
@@ -963,6 +966,9 @@ ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr,
{
struct tpm_chip *chip = dev_get_drvdata(dev);
+ if (chip->vendor.duration[TPM_LONG] == 0)
+ return 0;
+
return sprintf(buf, "%d %d %d [%s]\n",
jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
@@ -1102,6 +1108,7 @@ ssize_t tpm_read(struct file *file, char __user *buf,
{
struct tpm_chip *chip = file->private_data;
ssize_t ret_size;
+ int rc;
del_singleshot_timer_sync(&chip->user_read_timer);
flush_work_sync(&chip->work);
@@ -1112,8 +1119,11 @@ ssize_t tpm_read(struct file *file, char __user *buf,
ret_size = size;
mutex_lock(&chip->buffer_mutex);
- if (copy_to_user(buf, chip->data_buffer, ret_size))
+ rc = copy_to_user(buf, chip->data_buffer, ret_size);
+ memset(chip->data_buffer, 0, ret_size);
+ if (rc)
ret_size = -EFAULT;
+
mutex_unlock(&chip->buffer_mutex);
}
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 82facc9104c..4d2464871ad 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -396,8 +396,6 @@ static void __exit cleanup_nsc(void)
if (pdev) {
tpm_nsc_remove(&pdev->dev);
platform_device_unregister(pdev);
- kfree(pdev);
- pdev = NULL;
}
platform_driver_unregister(&nsc_drv);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 57cd3a406ed..fd7170a9ad2 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -290,6 +290,9 @@ static const struct {
{PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_CYCLE_TIMER},
+ {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
+ QUIRK_NO_MSI},
+
{PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_CYCLE_TIMER},
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 0599854e221..118ec12d2d5 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -34,8 +34,8 @@ struct gpio_bank {
u16 irq;
u16 virtual_irq_start;
int method;
-#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
u32 suspend_wakeup;
+#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
u32 saved_wakeup;
#endif
u32 non_wakeup_gpios;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index c43b8ff626a..0550dcb8581 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -577,6 +577,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
void
pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
{
+ *gpio_base = -1;
}
#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ce045a8cf82..f07e4252b70 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -67,11 +67,11 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
MODULE_PARM_DESC(i915_enable_rc6,
"Enable power-saving render C-state 6 (default: true)");
-unsigned int i915_enable_fbc __read_mostly = 1;
+unsigned int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
MODULE_PARM_DESC(i915_enable_fbc,
"Enable frame buffer compression for power savings "
- "(default: false)");
+ "(default: -1 (use per-chip default))");
unsigned int i915_lvds_downclock __read_mostly = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 56a8554d903..04411ad2e77 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1799,6 +1799,7 @@ static void intel_update_fbc(struct drm_device *dev)
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
+ int enable_fbc;
DRM_DEBUG_KMS("\n");
@@ -1839,8 +1840,15 @@ static void intel_update_fbc(struct drm_device *dev)
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
- if (!i915_enable_fbc) {
- DRM_DEBUG_KMS("fbc disabled per module param (default off)\n");
+ enable_fbc = i915_enable_fbc;
+ if (enable_fbc < 0) {
+ DRM_DEBUG_KMS("fbc set to per-chip default\n");
+ enable_fbc = 1;
+ if (INTEL_INFO(dev)->gen <= 5)
+ enable_fbc = 0;
+ }
+ if (!enable_fbc) {
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
goto out_disable;
}
@@ -4687,13 +4695,13 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
bpc = 6; /* min is 18bpp */
break;
case 24:
- bpc = min((unsigned int)8, display_bpc);
+ bpc = 8;
break;
case 30:
- bpc = min((unsigned int)10, display_bpc);
+ bpc = 10;
break;
case 48:
- bpc = min((unsigned int)12, display_bpc);
+ bpc = 12;
break;
default:
DRM_DEBUG("unsupported depth, assuming 24 bits\n");
@@ -4701,10 +4709,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
break;
}
+ display_bpc = min(display_bpc, bpc);
+
DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
bpc, display_bpc);
- *pipe_bpp = bpc * 3;
+ *pipe_bpp = display_bpc * 3;
return display_bpc != bpc;
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0b2ee9d3998..fe1099d8817 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -337,9 +337,6 @@ extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
struct intel_load_detect_pipe *old);
-extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
-extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
-extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 30fe554d893..6348c499616 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -92,6 +92,11 @@ struct intel_sdvo {
*/
uint16_t attached_output;
+ /*
+ * Hotplug activation bits for this device
+ */
+ uint8_t hotplug_active[2];
+
/**
* This is used to select the color range of RBG outputs in HDMI mode.
* It is only valid when using TMDS encoding and 8 bit per color mode.
@@ -1208,74 +1213,20 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
return true;
}
-/* No use! */
-#if 0
-struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
-{
- struct drm_connector *connector = NULL;
- struct intel_sdvo *iout = NULL;
- struct intel_sdvo *sdvo;
-
- /* find the sdvo connector */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- iout = to_intel_sdvo(connector);
-
- if (iout->type != INTEL_OUTPUT_SDVO)
- continue;
-
- sdvo = iout->dev_priv;
-
- if (sdvo->sdvo_reg == SDVOB && sdvoB)
- return connector;
-
- if (sdvo->sdvo_reg == SDVOC && !sdvoB)
- return connector;
-
- }
-
- return NULL;
-}
-
-int intel_sdvo_supports_hotplug(struct drm_connector *connector)
+static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
{
u8 response[2];
- u8 status;
- struct intel_sdvo *intel_sdvo;
- DRM_DEBUG_KMS("\n");
-
- if (!connector)
- return 0;
-
- intel_sdvo = to_intel_sdvo(connector);
return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
&response, 2) && response[0];
}
-void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
+static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
{
- u8 response[2];
- u8 status;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector);
-
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_sdvo, &response, 2);
-
- if (on) {
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
- status = intel_sdvo_read_response(intel_sdvo, &response, 2);
-
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
- } else {
- response[0] = 0;
- response[1] = 0;
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
- }
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_sdvo, &response, 2);
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);
}
-#endif
static bool
intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
@@ -2045,6 +1996,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
@@ -2062,7 +2014,17 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ intel_sdvo->hotplug_active[0] |= 1 << device;
+ /* Some SDVO devices have one-shot hotplug interrupts.
+ * Ensure that they get re-enabled when an interrupt happens.
+ */
+ intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
+ intel_sdvo_enable_hotplug(intel_encoder);
+ }
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
@@ -2569,6 +2531,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
goto err;
+ /* Set up hotplug command - note paranoia about contents of reply.
+ * We assume that the hardware is in a sane state, and only touch
+ * the bits we think we understand.
+ */
+ intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
+ &intel_sdvo->hotplug_active, 2);
+ intel_sdvo->hotplug_active[0] &= ~0x3;
+
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index e88c64417a8..14cc88aaf3a 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -277,7 +277,12 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
case ATOM_ARG_FB:
idx = U8(*ptr);
(*ptr)++;
- val = gctx->scratch[((gctx->fb_base + idx) / 4)];
+ if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
+ DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
+ gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
+ val = 0;
+ } else
+ val = gctx->scratch[(gctx->fb_base / 4) + idx];
if (print)
DEBUG("FB[0x%02X]", idx);
break;
@@ -531,7 +536,11 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
case ATOM_ARG_FB:
idx = U8(*ptr);
(*ptr)++;
- gctx->scratch[((gctx->fb_base + idx) / 4)] = val;
+ if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
+ DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
+ gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
+ } else
+ gctx->scratch[(gctx->fb_base / 4) + idx] = val;
DEBUG("FB[0x%02X]", idx);
break;
case ATOM_ARG_PLL:
@@ -1370,11 +1379,13 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
}
+ ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
usage_bytes = 20 * 1024;
/* allocate some scratch memory */
ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
if (!ctx->scratch)
return -ENOMEM;
+ ctx->scratch_size_bytes = usage_bytes;
return 0;
}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index a589a55b223..93cfe2086ba 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -137,6 +137,7 @@ struct atom_context {
int cs_equal, cs_above;
int io_mode;
uint32_t *scratch;
+ int scratch_size_bytes;
};
extern int atom_debug;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index c742944d380..a515b2a09d8 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -466,7 +466,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
return;
}
args.v2.ucEnable = enable;
- if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK))
+ if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
args.v2.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE3(rdev)) {
args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 7ad43c6b1db..79e8ebc0530 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -115,6 +115,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
u8 msg[20];
int msg_bytes = send_bytes + 4;
u8 ack;
+ unsigned retry;
if (send_bytes > 16)
return -1;
@@ -125,20 +126,22 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
msg[3] = (msg_bytes << 4) | (send_bytes - 1);
memcpy(&msg[4], send, send_bytes);
- while (1) {
+ for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, NULL, 0, delay, &ack);
- if (ret < 0)
+ if (ret == -EBUSY)
+ continue;
+ else if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
- break;
+ return send_bytes;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400);
else
return -EIO;
}
- return send_bytes;
+ return -EIO;
}
static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
@@ -149,26 +152,31 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
int msg_bytes = 4;
u8 ack;
int ret;
+ unsigned retry;
msg[0] = address;
msg[1] = address >> 8;
msg[2] = AUX_NATIVE_READ << 4;
msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
- while (1) {
+ for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, recv, recv_bytes, delay, &ack);
- if (ret == 0)
- return -EPROTO;
- if (ret < 0)
+ if (ret == -EBUSY)
+ continue;
+ else if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return ret;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400);
+ else if (ret == 0)
+ return -EPROTO;
else
return -EIO;
}
+
+ return -EIO;
}
static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
@@ -232,7 +240,9 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(auxch,
msg, msg_bytes, reply, reply_bytes, 0, &ack);
- if (ret < 0) {
+ if (ret == -EBUSY)
+ continue;
+ else if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index dc0a5b56c81..c4ffa14fb2f 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1404,7 +1404,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
- WREG32(CP_RB_WPTR, 0);
+ rdev->cp.wptr = 0;
+ WREG32(CP_RB_WPTR, rdev->cp.wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
@@ -1426,7 +1427,6 @@ int evergreen_cp_resume(struct radeon_device *rdev)
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
rdev->cp.rptr = RREG32(CP_RB_RPTR);
- rdev->cp.wptr = RREG32(CP_RB_WPTR);
evergreen_cp_start(rdev);
rdev->cp.ready = true;
@@ -1590,48 +1590,6 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map;
}
-static void evergreen_program_channel_remap(struct radeon_device *rdev)
-{
- u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
-
- tmp = RREG32(MC_SHARED_CHMAP);
- switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
- case 0:
- case 1:
- case 2:
- case 3:
- default:
- /* default mapping */
- mc_shared_chremap = 0x00fac688;
- break;
- }
-
- switch (rdev->family) {
- case CHIP_HEMLOCK:
- case CHIP_CYPRESS:
- case CHIP_BARTS:
- tcp_chan_steer_lo = 0x54763210;
- tcp_chan_steer_hi = 0x0000ba98;
- break;
- case CHIP_JUNIPER:
- case CHIP_REDWOOD:
- case CHIP_CEDAR:
- case CHIP_PALM:
- case CHIP_SUMO:
- case CHIP_SUMO2:
- case CHIP_TURKS:
- case CHIP_CAICOS:
- default:
- tcp_chan_steer_lo = 0x76543210;
- tcp_chan_steer_hi = 0x0000ba98;
- break;
- }
-
- WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
- WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
- WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
-}
-
static void evergreen_gpu_init(struct radeon_device *rdev)
{
u32 cc_rb_backend_disable = 0;
@@ -2078,8 +2036,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
- evergreen_program_channel_remap(rdev);
-
num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
@@ -3171,21 +3127,23 @@ int evergreen_suspend(struct radeon_device *rdev)
}
int evergreen_copy_blit(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_pages, struct radeon_fence *fence)
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence *fence)
{
int r;
mutex_lock(&rdev->r600_blit.mutex);
rdev->r600_blit.vb_ib = NULL;
- r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+ r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
if (r) {
if (rdev->r600_blit.vb_ib)
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
mutex_unlock(&rdev->r600_blit.mutex);
return r;
}
- evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
+ evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
evergreen_blit_done_copy(rdev, fence);
mutex_unlock(&rdev->r600_blit.mutex);
return 0;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index cbf57d75d92..8c79ca97753 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -569,36 +569,6 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map;
}
-static void cayman_program_channel_remap(struct radeon_device *rdev)
-{
- u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
-
- tmp = RREG32(MC_SHARED_CHMAP);
- switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
- case 0:
- case 1:
- case 2:
- case 3:
- default:
- /* default mapping */
- mc_shared_chremap = 0x00fac688;
- break;
- }
-
- switch (rdev->family) {
- case CHIP_CAYMAN:
- default:
- //tcp_chan_steer_lo = 0x54763210
- tcp_chan_steer_lo = 0x76543210;
- tcp_chan_steer_hi = 0x0000ba98;
- break;
- }
-
- WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
- WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
- WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
-}
-
static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
u32 disable_mask_per_se,
u32 max_disable_mask_per_se,
@@ -842,8 +812,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
- cayman_program_channel_remap(rdev);
-
/* primary versions */
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
@@ -1187,7 +1155,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
- WREG32(CP_RB0_WPTR, 0);
+ rdev->cp.wptr = 0;
+ WREG32(CP_RB0_WPTR, rdev->cp.wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1207,7 +1176,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
rdev->cp.rptr = RREG32(CP_RB0_RPTR);
- rdev->cp.wptr = RREG32(CP_RB0_WPTR);
/* ring1 - compute only */
/* Set ring buffer size */
@@ -1220,7 +1188,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
- WREG32(CP_RB1_WPTR, 0);
+ rdev->cp1.wptr = 0;
+ WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1232,7 +1201,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
- rdev->cp1.wptr = RREG32(CP_RB1_WPTR);
/* ring2 - compute only */
/* Set ring buffer size */
@@ -1245,7 +1213,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
- WREG32(CP_RB2_WPTR, 0);
+ rdev->cp2.wptr = 0;
+ WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1257,7 +1226,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
- rdev->cp2.wptr = RREG32(CP_RB2_WPTR);
/* start the rings */
cayman_cp_start(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index f2204cb1ccd..7fcdbbbf297 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
- unsigned num_pages,
+ unsigned num_gpu_pages,
struct radeon_fence *fence)
{
uint32_t cur_pages;
- uint32_t stride_bytes = PAGE_SIZE;
+ uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
uint32_t pitch;
uint32_t stride_pixels;
unsigned ndw;
@@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev,
/* radeon pitch is /64 */
pitch = stride_bytes / 64;
stride_pixels = stride_bytes / 4;
- num_loops = DIV_ROUND_UP(num_pages, 8191);
+ num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
/* Ask for enough room for blit + flush + fence */
ndw = 64 + (10 * num_loops);
@@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev,
DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
return -EINVAL;
}
- while (num_pages > 0) {
- cur_pages = num_pages;
+ while (num_gpu_pages > 0) {
+ cur_pages = num_gpu_pages;
if (cur_pages > 8191) {
cur_pages = 8191;
}
- num_pages -= cur_pages;
+ num_gpu_pages -= cur_pages;
/* pages are in Y direction - height
page width in X direction - width */
@@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev,
radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
- radeon_ring_write(rdev, num_pages);
- radeon_ring_write(rdev, num_pages);
+ radeon_ring_write(rdev, num_gpu_pages);
+ radeon_ring_write(rdev, num_gpu_pages);
radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
}
radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
@@ -990,7 +990,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
/* Force read & write ptr to 0 */
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
- WREG32(RADEON_CP_RB_WPTR, 0);
+ rdev->cp.wptr = 0;
+ WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
/* set the wb address whether it's enabled or not */
WREG32(R_00070C_CP_RB_RPTR_ADDR,
@@ -1007,9 +1008,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_CNTL, tmp);
udelay(10);
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
- rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
- /* protect against crazy HW on resume */
- rdev->cp.wptr &= rdev->cp.ptr_mask;
/* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index f2405830041..a1f3ba063c2 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
- unsigned num_pages,
+ unsigned num_gpu_pages,
struct radeon_fence *fence)
{
uint32_t size;
@@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev,
int r = 0;
/* radeon pitch is /64 */
- size = num_pages << PAGE_SHIFT;
+ size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
r = radeon_ring_lock(rdev, num_loops * 4 + 64);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index aa5571b73aa..720dd99163f 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2209,7 +2209,8 @@ int r600_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
- WREG32(CP_RB_WPTR, 0);
+ rdev->cp.wptr = 0;
+ WREG32(CP_RB_WPTR, rdev->cp.wptr);
/* set the wb address whether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
@@ -2231,7 +2232,6 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
rdev->cp.rptr = RREG32(CP_RB_RPTR);
- rdev->cp.wptr = RREG32(CP_RB_WPTR);
r600_cp_start(rdev);
rdev->cp.ready = true;
@@ -2353,21 +2353,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
}
int r600_copy_blit(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_pages, struct radeon_fence *fence)
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence *fence)
{
int r;
mutex_lock(&rdev->r600_blit.mutex);
rdev->r600_blit.vb_ib = NULL;
- r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+ r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
if (r) {
if (rdev->r600_blit.vb_ib)
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
mutex_unlock(&rdev->r600_blit.mutex);
return r;
}
- r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
+ r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
r600_blit_done_copy(rdev, fence);
mutex_unlock(&rdev->r600_blit.mutex);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 32807baf55e..c1e056b35b2 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -322,6 +322,7 @@ union radeon_gart_table {
#define RADEON_GPU_PAGE_SIZE 4096
#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
+#define RADEON_GPU_PAGE_SHIFT 12
struct radeon_gart {
dma_addr_t table_addr;
@@ -914,17 +915,17 @@ struct radeon_asic {
int (*copy_blit)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
- unsigned num_pages,
+ unsigned num_gpu_pages,
struct radeon_fence *fence);
int (*copy_dma)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
- unsigned num_pages,
+ unsigned num_gpu_pages,
struct radeon_fence *fence);
int (*copy)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
- unsigned num_pages,
+ unsigned num_gpu_pages,
struct radeon_fence *fence);
uint32_t (*get_engine_clock)(struct radeon_device *rdev);
void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3d7a0d7c6a9..3dedaa07aac 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
- unsigned num_pages,
+ unsigned num_gpu_pages,
struct radeon_fence *fence);
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
@@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
extern int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
- unsigned num_pages,
+ unsigned num_gpu_pages,
struct radeon_fence *fence);
void r200_set_safe_registers(struct radeon_device *rdev);
@@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
- unsigned num_pages, struct radeon_fence *fence);
+ unsigned num_gpu_pages, struct radeon_fence *fence);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
- unsigned num_pages, struct radeon_fence *fence);
+ unsigned num_gpu_pages, struct radeon_fence *fence);
void evergreen_hpd_init(struct radeon_device *rdev);
void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index c4b8741dbf5..449c3d8c683 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -68,11 +68,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
int saved_dpms = connector->dpms;
- if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
- radeon_dp_needs_link_train(radeon_connector))
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- else
+ /* Only turn off the display it it's physically disconnected */
+ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ else if (radeon_dp_needs_link_train(radeon_connector))
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
connector->dpms = saved_dpms;
}
}
@@ -1303,23 +1303,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
/* get the DPCD from the bridge */
radeon_dp_getdpcd(radeon_connector);
- if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
- ret = connector_status_connected;
- else {
- /* need to setup ddc on the bridge */
- if (encoder)
- radeon_atom_ext_encoder_setup_ddc(encoder);
+ if (encoder) {
+ /* setup ddc on the bridge */
+ radeon_atom_ext_encoder_setup_ddc(encoder);
if (radeon_ddc_probe(radeon_connector,
- radeon_connector->requires_extended_probe))
+ radeon_connector->requires_extended_probe)) /* try DDC */
ret = connector_status_connected;
- }
-
- if ((ret == connector_status_disconnected) &&
- radeon_connector->dac_load_detect) {
- struct drm_encoder *encoder = radeon_best_single_encoder(connector);
- struct drm_encoder_helper_funcs *encoder_funcs;
- if (encoder) {
- encoder_funcs = encoder->helper_private;
+ else if (radeon_connector->dac_load_detect) { /* try load detection */
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 3189a7efb2e..fde25c0d65a 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -208,23 +208,25 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int xorigin = 0, yorigin = 0;
int w = radeon_crtc->cursor_width;
- if (x < 0)
- xorigin = -x + 1;
- if (y < 0)
- yorigin = -y + 1;
- if (xorigin >= CURSOR_WIDTH)
- xorigin = CURSOR_WIDTH - 1;
- if (yorigin >= CURSOR_HEIGHT)
- yorigin = CURSOR_HEIGHT - 1;
-
if (ASIC_IS_AVIVO(rdev)) {
- int i = 0;
- struct drm_crtc *crtc_p;
-
/* avivo cursor are offset into the total surface */
x += crtc->x;
y += crtc->y;
- DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+ }
+ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+ if (x < 0) {
+ xorigin = min(-x, CURSOR_WIDTH - 1);
+ x = 0;
+ }
+ if (y < 0) {
+ yorigin = min(-y, CURSOR_HEIGHT - 1);
+ y = 0;
+ }
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ int i = 0;
+ struct drm_crtc *crtc_p;
/* avivo cursor image can't end on 128 pixel boundary or
* go past the end of the frame if both crtcs are enabled
@@ -253,16 +255,12 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
radeon_lock_cursor(crtc, true);
if (ASIC_IS_DCE4(rdev)) {
- WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
- ((xorigin ? 0 : x) << 16) |
- (yorigin ? 0 : y));
+ WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
} else if (ASIC_IS_AVIVO(rdev)) {
- WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
- ((xorigin ? 0 : x) << 16) |
- (yorigin ? 0 : y));
+ WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
@@ -276,8 +274,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
| yorigin));
WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
(RADEON_CUR_LOCK
- | ((xorigin ? 0 : x) << 16)
- | (yorigin ? 0 : y)));
+ | (x << 16)
+ | y));
/* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
(yorigin * 256)));
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6cc17fb96a5..6adb3e58aff 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -473,8 +473,8 @@ pflip_cleanup:
spin_lock_irqsave(&dev->event_lock, flags);
radeon_crtc->unpin_work = NULL;
unlock_free:
- drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
spin_unlock_irqrestore(&dev->event_lock, flags);
+ drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
radeon_fence_unref(&work->fence);
kfree(work);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 319d85d7e75..eb3f6dc6df8 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -1507,7 +1507,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
args.ucAction = ATOM_ENABLE;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ /* workaround for DVOOutputControl on some RS690 systems */
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
+ u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
+ WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ WREG32(RADEON_BIOS_3_SCRATCH, reg);
+ } else
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
args.ucAction = ATOM_LCD_BLON;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -1631,7 +1638,17 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
break;
case 2:
args.v2.ucCRTC = radeon_crtc->crtc_id;
- args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ if (radeon_encoder_is_dp_bridge(encoder)) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+ else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
+ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
+ else
+ args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ } else
+ args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
@@ -1748,9 +1765,17 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
/* DCE4/5 */
if (ASIC_IS_DCE4(rdev)) {
dig = radeon_encoder->enc_priv;
- if (ASIC_IS_DCE41(rdev))
- return radeon_crtc->crtc_id;
- else {
+ if (ASIC_IS_DCE41(rdev)) {
+ /* ontario follows DCE4 */
+ if (rdev->family == CHIP_PALM) {
+ if (dig->linkb)
+ return 1;
+ else
+ return 0;
+ } else
+ /* llano follows DCE3.2 */
+ return radeon_crtc->crtc_id;
+ } else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 9b86fb0e412..0b5468bfaf5 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
DRM_ERROR("Trying to move memory with CP turned off.\n");
return -EINVAL;
}
- r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
+
+ BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
+
+ r = radeon_copy(rdev, old_start, new_start,
+ new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
+ fence);
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
evict, no_wait_reserve, no_wait_gpu, new_mem);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4720d000d44..b13c2eedc32 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -536,55 +536,6 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map;
}
-static void rv770_program_channel_remap(struct radeon_device *rdev)
-{
- u32 tcp_chan_steer, mc_shared_chremap, tmp;
- bool force_no_swizzle;
-
- switch (rdev->family) {
- case CHIP_RV770:
- case CHIP_RV730:
- force_no_swizzle = false;
- break;
- case CHIP_RV710:
- case CHIP_RV740:
- default:
- force_no_swizzle = true;
- break;
- }
-
- tmp = RREG32(MC_SHARED_CHMAP);
- switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
- case 0:
- case 1:
- default:
- /* default mapping */
- mc_shared_chremap = 0x00fac688;
- break;
- case 2:
- case 3:
- if (force_no_swizzle)
- mc_shared_chremap = 0x00fac688;
- else
- mc_shared_chremap = 0x00bbc298;
- break;
- }
-
- if (rdev->family == CHIP_RV740)
- tcp_chan_steer = 0x00ef2a60;
- else
- tcp_chan_steer = 0x00fac688;
-
- /* RV770 CE has special chremap setup */
- if (rdev->pdev->device == 0x944e) {
- tcp_chan_steer = 0x00b08b08;
- mc_shared_chremap = 0x00b08b08;
- }
-
- WREG32(TCP_CHAN_STEER, tcp_chan_steer);
- WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
-}
-
static void rv770_gpu_init(struct radeon_device *rdev)
{
int i, j, num_qd_pipes;
@@ -785,8 +736,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
- rv770_program_channel_remap(rdev);
-
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a4d38d85909..ef06194c5aa 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -394,7 +394,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
if (bo->ttm == NULL) {
- ret = ttm_bo_add_ttm(bo, false);
+ bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
+ ret = ttm_bo_add_ttm(bo, zero);
if (ret)
goto out_err;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ae3c6f5dd2b..082fcaea583 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -321,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
- struct ttm_mem_reg old_copy;
+ struct ttm_mem_reg old_copy = *old_mem;
void *old_iomap;
void *new_iomap;
int ret;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 1130a898712..22a4a051f22 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -69,7 +69,7 @@ config HID_ACRUX
Say Y here if you want to enable support for ACRUX game controllers.
config HID_ACRUX_FF
- tristate "ACRUX force feedback support"
+ bool "ACRUX force feedback support"
depends on HID_ACRUX
select INPUT_FF_MEMLESS
---help---
@@ -245,6 +245,15 @@ config HID_LOGITECH
---help---
Support for Logitech devices that are not fully compliant with HID standard.
+config HID_LOGITECH_DJ
+ tristate "Logitech Unifying receivers full support"
+ depends on HID_LOGITECH
+ default m
+ ---help---
+ Say Y if you want support for Logitech Unifying receivers and devices.
+ Unifying receivers are capable of pairing up to 6 Logitech compliant
+ devices to the same receiver.
+
config LOGITECH_FF
bool "Logitech force feedback support"
depends on HID_LOGITECH
@@ -278,13 +287,21 @@ config LOGIG940_FF
Say Y here if you want to enable force feedback support for Logitech
Flight System G940 devices.
-config LOGIWII_FF
- bool "Logitech Speed Force Wireless force feedback support"
+config LOGIWHEELS_FF
+ bool "Logitech wheels configuration and force feedback support"
depends on HID_LOGITECH
select INPUT_FF_MEMLESS
+ default LOGITECH_FF
help
- Say Y here if you want to enable force feedback support for Logitech
- Speed Force Wireless (Wii) devices.
+ Say Y here if you want to enable force feedback and range setting
+ support for following Logitech wheels:
+ - Logitech Driving Force
+ - Logitech Driving Force Pro
+ - Logitech Driving Force GT
+ - Logitech G25
+ - Logitech G27
+ - Logitech MOMO/MOMO 2
+ - Logitech Formula Force EX
config HID_MAGICMOUSE
tristate "Apple MagicMouse multi-touch support"
@@ -328,6 +345,7 @@ config HID_MULTITOUCH
- Hanvon dual touch panels
- Ilitek dual touch panels
- IrTouch Infrared USB panels
+ - LG Display panels (Dell ST2220Tc)
- Lumio CrystalTouch panels
- MosArt dual-touch panels
- PenMount dual touch panels
@@ -441,6 +459,13 @@ config HID_PICOLCD_LEDS
---help---
Provide access to PicoLCD's GPO pins via leds class.
+config HID_PRIMAX
+ tristate "Primax non-fully HID-compliant devices"
+ depends on USB_HID
+ ---help---
+ Support for Primax devices that are not fully compliant with the
+ HID standard.
+
config HID_QUANTA
tristate "Quanta Optical Touch panels"
depends on USB_HID
@@ -539,7 +564,11 @@ config HID_SMARTJOYPLUS
tristate "SmartJoy PLUS PS2/USB adapter support"
depends on USB_HID
---help---
- Support for SmartJoy PLUS PS2/USB adapter.
+ Support for SmartJoy PLUS PS2/USB adapter, Super Dual Box,
+ Super Joy Box 3 Pro, Super Dual Box Pro, and Super Joy Box 5 Pro.
+
+ Note that DDR (Dance Dance Revolution) mode is not supported, nor
+ is pressure sensitive buttons on the pro models.
config SMARTJOYPLUS_FF
bool "SmartJoy PLUS PS2/USB adapter force feedback support"
@@ -590,6 +619,7 @@ config HID_WIIMOTE
tristate "Nintendo Wii Remote support"
depends on BT_HIDP
depends on LEDS_CLASS
+ select POWER_SUPPLY
---help---
Support for the Nintendo Wii Remote bluetooth device.
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 0a0a38e9fd2..1e0d2a638b2 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -21,7 +21,7 @@ endif
ifdef CONFIG_LOGIG940_FF
hid-logitech-y += hid-lg3ff.o
endif
-ifdef CONFIG_LOGIWII_FF
+ifdef CONFIG_LOGIWHEELS_FF
hid-logitech-y += hid-lg4ff.o
endif
@@ -43,6 +43,7 @@ obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
+obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
@@ -54,6 +55,7 @@ obj-$(CONFIG_HID_QUANTA) += hid-quanta.o
obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
+obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o
obj-$(CONFIG_HID_ROCCAT_COMMON) += hid-roccat-common.o
obj-$(CONFIG_HID_ROCCAT_ARVO) += hid-roccat-arvo.o
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 18b3bc646bf..9bc7b03269d 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -183,6 +183,9 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
table = macbookair_fn_keys;
+ else if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI &&
+ hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING6_JIS)
+ table = macbookair_fn_keys;
else if (hid->product < 0x21d || hid->product >= 0x300)
table = powerbook_fn_keys;
else
@@ -493,6 +496,18 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
index 121514149e0..3bdb4500f95 100644
--- a/drivers/hid/hid-axff.c
+++ b/drivers/hid/hid-axff.c
@@ -6,7 +6,7 @@
* Xbox 360 controller.
*
* 1a34:0802 "ACRUX USB GAMEPAD 8116"
- * - tested with a EXEQ EQ-PCU-02090 game controller.
+ * - tested with an EXEQ EQ-PCU-02090 game controller.
*
* Copyright (c) 2010 Sergei Kolzun <x0r@dv-life.ru>
*/
@@ -45,7 +45,10 @@ static int axff_play(struct input_dev *dev, void *data, struct ff_effect *effect
{
struct hid_device *hid = input_get_drvdata(dev);
struct axff_device *axff = data;
+ struct hid_report *report = axff->report;
+ int field_count = 0;
int left, right;
+ int i, j;
left = effect->u.rumble.strong_magnitude;
right = effect->u.rumble.weak_magnitude;
@@ -55,10 +58,14 @@ static int axff_play(struct input_dev *dev, void *data, struct ff_effect *effect
left = left * 0xff / 0xffff;
right = right * 0xff / 0xffff;
- axff->report->field[0]->value[0] = left;
- axff->report->field[1]->value[0] = right;
- axff->report->field[2]->value[0] = left;
- axff->report->field[3]->value[0] = right;
+ for (i = 0; i < report->maxfield; i++) {
+ for (j = 0; j < report->field[i]->report_count; j++) {
+ report->field[i]->value[j] =
+ field_count % 2 ? right : left;
+ field_count++;
+ }
+ }
+
dbg_hid("running with 0x%02x 0x%02x", left, right);
usbhid_submit_report(hid, axff->report, USB_DIR_OUT);
@@ -72,6 +79,8 @@ static int axff_init(struct hid_device *hid)
struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
+ int field_count = 0;
+ int i, j;
int error;
if (list_empty(report_list)) {
@@ -80,9 +89,16 @@ static int axff_init(struct hid_device *hid)
}
report = list_first_entry(report_list, struct hid_report, list);
+ for (i = 0; i < report->maxfield; i++) {
+ for (j = 0; j < report->field[i]->report_count; j++) {
+ report->field[i]->value[j] = 0x00;
+ field_count++;
+ }
+ }
- if (report->maxfield < 4) {
- hid_err(hid, "no fields in the report: %d\n", report->maxfield);
+ if (field_count < 4) {
+ hid_err(hid, "not enough fields in the report: %d\n",
+ field_count);
return -ENODEV;
}
@@ -97,13 +113,9 @@ static int axff_init(struct hid_device *hid)
goto err_free_mem;
axff->report = report;
- axff->report->field[0]->value[0] = 0x00;
- axff->report->field[1]->value[0] = 0x00;
- axff->report->field[2]->value[0] = 0x00;
- axff->report->field[3]->value[0] = 0x00;
usbhid_submit_report(hid, axff->report, USB_DIR_OUT);
- hid_info(hid, "Force Feedback for ACRUX game controllers by Sergei Kolzun<x0r@dv-life.ru>\n");
+ hid_info(hid, "Force Feedback for ACRUX game controllers by Sergei Kolzun <x0r@dv-life.ru>\n");
return 0;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 242353df3dc..91adcc5bad2 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -29,6 +29,7 @@
#include <linux/wait.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
+#include <linux/semaphore.h>
#include <linux/hid.h>
#include <linux/hiddev.h>
@@ -1085,16 +1086,25 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
struct hid_report *report;
char *buf;
unsigned int i;
- int ret;
+ int ret = 0;
- if (!hid || !hid->driver)
+ if (!hid)
return -ENODEV;
+
+ if (down_trylock(&hid->driver_lock))
+ return -EBUSY;
+
+ if (!hid->driver) {
+ ret = -ENODEV;
+ goto unlock;
+ }
report_enum = hid->report_enum + type;
hdrv = hid->driver;
if (!size) {
dbg_hid("empty report\n");
- return -1;
+ ret = -1;
+ goto unlock;
}
buf = kmalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
@@ -1118,18 +1128,24 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
nomem:
report = hid_get_report(report_enum, data);
- if (!report)
- return -1;
+ if (!report) {
+ ret = -1;
+ goto unlock;
+ }
if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
ret = hdrv->raw_event(hid, report, data, size);
- if (ret != 0)
- return ret < 0 ? ret : 0;
+ if (ret != 0) {
+ ret = ret < 0 ? ret : 0;
+ goto unlock;
+ }
}
hid_report_raw_event(hid, type, data, size, interrupt);
- return 0;
+unlock:
+ up(&hid->driver_lock);
+ return ret;
}
EXPORT_SYMBOL_GPL(hid_input_report);
@@ -1212,6 +1228,12 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
connect_mask & HID_CONNECT_HIDINPUT_FORCE))
hdev->claimed |= HID_CLAIMED_INPUT;
+ if (hdev->quirks & HID_QUIRK_MULTITOUCH) {
+ /* this device should be handled by hid-multitouch, skip it */
+ hdev->quirks &= ~HID_QUIRK_MULTITOUCH;
+ return -ENODEV;
+ }
+
if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
!hdev->hiddev_connect(hdev,
connect_mask & HID_CONNECT_HIDDEV_FORCE))
@@ -1343,6 +1365,12 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -1391,6 +1419,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6650) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS, USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
@@ -1399,6 +1428,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
@@ -1420,8 +1450,11 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
@@ -1461,6 +1494,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_PCI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
@@ -1501,6 +1535,10 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0709) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
@@ -1620,10 +1658,15 @@ static int hid_device_probe(struct device *dev)
const struct hid_device_id *id;
int ret = 0;
+ if (down_interruptible(&hdev->driver_lock))
+ return -EINTR;
+
if (!hdev->driver) {
id = hid_match_device(hdev, hdrv);
- if (id == NULL)
- return -ENODEV;
+ if (id == NULL) {
+ ret = -ENODEV;
+ goto unlock;
+ }
hdev->driver = hdrv;
if (hdrv->probe) {
@@ -1636,14 +1679,20 @@ static int hid_device_probe(struct device *dev)
if (ret)
hdev->driver = NULL;
}
+unlock:
+ up(&hdev->driver_lock);
return ret;
}
static int hid_device_remove(struct device *dev)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- struct hid_driver *hdrv = hdev->driver;
+ struct hid_driver *hdrv;
+
+ if (down_interruptible(&hdev->driver_lock))
+ return -EINTR;
+ hdrv = hdev->driver;
if (hdrv) {
if (hdrv->remove)
hdrv->remove(hdev);
@@ -1652,6 +1701,7 @@ static int hid_device_remove(struct device *dev)
hdev->driver = NULL;
}
+ up(&hdev->driver_lock);
return 0;
}
@@ -1892,6 +1942,12 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ }
@@ -1999,6 +2055,7 @@ struct hid_device *hid_allocate_device(void)
init_waitqueue_head(&hdev->debug_wait);
INIT_LIST_HEAD(&hdev->debug_list);
+ sema_init(&hdev->driver_lock, 1);
return hdev;
err:
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index bae48745bb4..9a243ca96e6 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -450,6 +450,11 @@ void hid_dump_field(struct hid_field *field, int n, struct seq_file *f) {
seq_printf(f, "Logical(");
hid_resolv_usage(field->logical, f); seq_printf(f, ")\n");
}
+ if (field->application) {
+ tab(n, f);
+ seq_printf(f, "Application(");
+ hid_resolv_usage(field->application, f); seq_printf(f, ")\n");
+ }
tab(n, f); seq_printf(f, "Usage(%d)\n", field->maxusage);
for (j = 0; j < field->maxusage; j++) {
tab(n+2, f); hid_resolv_usage(field->usage[j].hid, f); seq_printf(f, "\n");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 7484e1b6724..1680e99b481 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -112,6 +112,12 @@
#define USB_DEVICE_ID_APPLE_ALU_REVB_ANSI 0x024f
#define USB_DEVICE_ID_APPLE_ALU_REVB_ISO 0x0250
#define USB_DEVICE_ID_APPLE_ALU_REVB_JIS 0x0251
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b
+#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
+#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
+#define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
@@ -351,6 +357,9 @@
#define USB_DEVICE_ID_UGCI_FLYING 0x0020
#define USB_DEVICE_ID_UGCI_FIGHTING 0x0030
+#define USB_VENDOR_ID_IDEACOM 0x1cb6
+#define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650
+
#define USB_VENDOR_ID_ILITEK 0x222a
#define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001
@@ -423,6 +432,9 @@
#define USB_DEVICE_ID_LD_HYBRID 0x2090
#define USB_DEVICE_ID_LD_HEATCONTROL 0x20A0
+#define USB_VENDOR_ID_LG 0x1fd2
+#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
+
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110
@@ -440,6 +452,7 @@
#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295
#define USB_DEVICE_ID_LOGITECH_DFP_WHEEL 0xc298
#define USB_DEVICE_ID_LOGITECH_G25_WHEEL 0xc299
+#define USB_DEVICE_ID_LOGITECH_DFGT_WHEEL 0xc29a
#define USB_DEVICE_ID_LOGITECH_G27_WHEEL 0xc29b
#define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c
#define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a
@@ -447,6 +460,8 @@
#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517
#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512
#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513
+#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER 0xc52b
+#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704
@@ -678,6 +693,9 @@
#define USB_VENDOR_ID_WISEGROUP_LTD 0x6666
#define USB_VENDOR_ID_WISEGROUP_LTD2 0x6677
#define USB_DEVICE_ID_SMARTJOY_DUAL_PLUS 0x8802
+#define USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO 0x8801
+#define USB_DEVICE_ID_SUPER_DUAL_BOX_PRO 0x8802
+#define USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO 0x8804
#define USB_VENDOR_ID_X_TENSIONS 0x1ae7
#define USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE 0x9001
@@ -693,4 +711,7 @@
#define USB_VENDOR_ID_ZYDACRON 0x13EC
#define USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL 0x0006
+#define USB_VENDOR_ID_PRIMAX 0x0461
+#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
+
#endif
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 6559e2e3364..f333139d1a4 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -474,6 +474,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
map_key_clear(BTN_STYLUS2);
break;
+ case 0x51: /* ContactID */
+ device->quirks |= HID_QUIRK_MULTITOUCH;
+ goto unknown;
+
default: goto unknown;
}
break;
@@ -978,6 +982,13 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
}
}
+ if (hid->quirks & HID_QUIRK_MULTITOUCH) {
+ /* generic hid does not know how to handle multitouch devices */
+ if (hidinput)
+ goto out_cleanup;
+ goto out_unwind;
+ }
+
if (hidinput && input_register_device(hidinput->input))
goto out_cleanup;
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index a7f916e8fc3..e7a7bd1eb34 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -363,7 +363,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_free;
}
- if (quirks & (LG_FF | LG_FF2 | LG_FF3))
+ if (quirks & (LG_FF | LG_FF2 | LG_FF3 | LG_FF4))
connect_mask &= ~HID_CONNECT_FF;
ret = hid_hw_start(hdev, connect_mask);
@@ -372,7 +372,8 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_free;
}
- if (quirks & LG_FF4) {
+ /* Setup wireless link with Logitech Wii wheel */
+ if(hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) {
unsigned char buf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
ret = hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
@@ -405,6 +406,15 @@ err_free:
return ret;
}
+static void lg_remove(struct hid_device *hdev)
+{
+ unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ if(quirks & LG_FF4)
+ lg4ff_deinit(hdev);
+
+ hid_hw_stop(hdev);
+}
+
static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
.driver_data = LG_RDESC | LG_WIRELESS },
@@ -431,7 +441,7 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D),
.driver_data = LG_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL),
- .driver_data = LG_NOGET | LG_FF },
+ .driver_data = LG_NOGET | LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD),
.driver_data = LG_FF2 },
@@ -444,15 +454,17 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO),
.driver_data = LG_FF },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL),
- .driver_data = LG_FF },
+ .driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2),
- .driver_data = LG_FF },
+ .driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL),
- .driver_data = LG_FF },
+ .driver_data = LG_FF4 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL),
+ .driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL),
- .driver_data = LG_FF },
+ .driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL),
- .driver_data = LG_NOGET | LG_FF },
+ .driver_data = LG_NOGET | LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL),
.driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ),
@@ -478,6 +490,7 @@ static struct hid_driver lg_driver = {
.input_mapped = lg_input_mapped,
.event = lg_event,
.probe = lg_probe,
+ .remove = lg_remove,
};
static int __init lg_init(void)
diff --git a/drivers/hid/hid-lg.h b/drivers/hid/hid-lg.h
index b0100ba2ae0..4b097286dc7 100644
--- a/drivers/hid/hid-lg.h
+++ b/drivers/hid/hid-lg.h
@@ -19,10 +19,12 @@ int lg3ff_init(struct hid_device *hdev);
static inline int lg3ff_init(struct hid_device *hdev) { return -1; }
#endif
-#ifdef CONFIG_LOGIWII_FF
+#ifdef CONFIG_LOGIWHEELS_FF
int lg4ff_init(struct hid_device *hdev);
+int lg4ff_deinit(struct hid_device *hdev);
#else
static inline int lg4ff_init(struct hid_device *hdev) { return -1; }
+static inline int lg4ff_deinit(struct hid_device *hdev) { return -1; }
#endif
#endif
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index fa550c8e1d1..103f30d93f7 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -29,19 +29,108 @@
#include "usbhid/usbhid.h"
#include "hid-lg.h"
+#include "hid-ids.h"
-struct lg4ff_device {
- struct hid_report *report;
+#define DFGT_REV_MAJ 0x13
+#define DFGT_REV_MIN 0x22
+#define DFP_REV_MAJ 0x11
+#define DFP_REV_MIN 0x06
+#define FFEX_REV_MAJ 0x21
+#define FFEX_REV_MIN 0x00
+#define G25_REV_MAJ 0x12
+#define G25_REV_MIN 0x22
+#define G27_REV_MAJ 0x12
+#define G27_REV_MIN 0x38
+
+#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
+
+static void hid_lg4ff_set_range_dfp(struct hid_device *hid, u16 range);
+static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range);
+static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf);
+static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count);
+
+static DEVICE_ATTR(range, S_IRWXU | S_IRWXG | S_IRWXO, lg4ff_range_show, lg4ff_range_store);
+
+static bool list_inited;
+
+struct lg4ff_device_entry {
+ char *device_id; /* Use name in respective kobject structure's address as the ID */
+ __u16 range;
+ __u16 min_range;
+ __u16 max_range;
+ __u8 leds;
+ struct list_head list;
+ void (*set_range)(struct hid_device *hid, u16 range);
};
-static const signed short ff4_wheel_ac[] = {
+static struct lg4ff_device_entry device_list;
+
+static const signed short lg4ff_wheel_effects[] = {
FF_CONSTANT,
FF_AUTOCENTER,
-1
};
-static int hid_lg4ff_play(struct input_dev *dev, void *data,
- struct ff_effect *effect)
+struct lg4ff_wheel {
+ const __u32 product_id;
+ const signed short *ff_effects;
+ const __u16 min_range;
+ const __u16 max_range;
+ void (*set_range)(struct hid_device *hid, u16 range);
+};
+
+static const struct lg4ff_wheel lg4ff_devices[] = {
+ {USB_DEVICE_ID_LOGITECH_WHEEL, lg4ff_wheel_effects, 40, 270, NULL},
+ {USB_DEVICE_ID_LOGITECH_MOMO_WHEEL, lg4ff_wheel_effects, 40, 270, NULL},
+ {USB_DEVICE_ID_LOGITECH_DFP_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_dfp},
+ {USB_DEVICE_ID_LOGITECH_G25_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
+ {USB_DEVICE_ID_LOGITECH_DFGT_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
+ {USB_DEVICE_ID_LOGITECH_G27_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
+ {USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2, lg4ff_wheel_effects, 40, 270, NULL},
+ {USB_DEVICE_ID_LOGITECH_WII_WHEEL, lg4ff_wheel_effects, 40, 270, NULL}
+};
+
+struct lg4ff_native_cmd {
+ const __u8 cmd_num; /* Number of commands to send */
+ const __u8 cmd[];
+};
+
+struct lg4ff_usb_revision {
+ const __u16 rev_maj;
+ const __u16 rev_min;
+ const struct lg4ff_native_cmd *command;
+};
+
+static const struct lg4ff_native_cmd native_dfp = {
+ 1,
+ {0xf8, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct lg4ff_native_cmd native_dfgt = {
+ 2,
+ {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* 1st command */
+ 0xf8, 0x09, 0x03, 0x01, 0x00, 0x00, 0x00} /* 2nd command */
+};
+
+static const struct lg4ff_native_cmd native_g25 = {
+ 1,
+ {0xf8, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct lg4ff_native_cmd native_g27 = {
+ 2,
+ {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* 1st command */
+ 0xf8, 0x09, 0x04, 0x01, 0x00, 0x00, 0x00} /* 2nd command */
+};
+
+static const struct lg4ff_usb_revision lg4ff_revs[] = {
+ {DFGT_REV_MAJ, DFGT_REV_MIN, &native_dfgt}, /* Driving Force GT */
+ {DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */
+ {G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */
+ {G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */
+};
+
+static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *effect)
{
struct hid_device *hid = input_get_drvdata(dev);
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
@@ -55,13 +144,12 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data,
x = effect->u.ramp.start_level + 0x80; /* 0x80 is no force */
CLAMP(x);
report->field[0]->value[0] = 0x11; /* Slot 1 */
- report->field[0]->value[1] = 0x10;
+ report->field[0]->value[1] = 0x08;
report->field[0]->value[2] = x;
- report->field[0]->value[3] = 0x00;
+ report->field[0]->value[3] = 0x80;
report->field[0]->value[4] = 0x00;
- report->field[0]->value[5] = 0x08;
+ report->field[0]->value[5] = 0x00;
report->field[0]->value[6] = 0x00;
- dbg_hid("Autocenter, x=0x%02X\n", x);
usbhid_submit_report(hid, report, USB_DIR_OUT);
break;
@@ -69,24 +157,184 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data,
return 0;
}
-static void hid_lg4ff_set_autocenter(struct input_dev *dev, u16 magnitude)
+/* Sends default autocentering command compatible with
+ * all wheels except Formula Force EX */
+static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitude)
{
struct hid_device *hid = input_get_drvdata(dev);
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
- __s32 *value = report->field[0]->value;
- *value++ = 0xfe;
- *value++ = 0x0d;
- *value++ = 0x07;
- *value++ = 0x07;
- *value++ = (magnitude >> 8) & 0xff;
- *value++ = 0x00;
- *value = 0x00;
+ report->field[0]->value[0] = 0xfe;
+ report->field[0]->value[1] = 0x0d;
+ report->field[0]->value[2] = magnitude >> 13;
+ report->field[0]->value[3] = magnitude >> 13;
+ report->field[0]->value[4] = magnitude >> 8;
+ report->field[0]->value[5] = 0x00;
+ report->field[0]->value[6] = 0x00;
+
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+}
+
+/* Sends autocentering command compatible with Formula Force EX */
+static void hid_lg4ff_set_autocenter_ffex(struct input_dev *dev, u16 magnitude)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ magnitude = magnitude * 90 / 65535;
+
+
+ report->field[0]->value[0] = 0xfe;
+ report->field[0]->value[1] = 0x03;
+ report->field[0]->value[2] = magnitude >> 14;
+ report->field[0]->value[3] = magnitude >> 14;
+ report->field[0]->value[4] = magnitude;
+ report->field[0]->value[5] = 0x00;
+ report->field[0]->value[6] = 0x00;
+
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+}
+
+/* Sends command to set range compatible with G25/G27/Driving Force GT */
+static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range)
+{
+ struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ dbg_hid("G25/G27/DFGT: setting range to %u\n", range);
+
+ report->field[0]->value[0] = 0xf8;
+ report->field[0]->value[1] = 0x81;
+ report->field[0]->value[2] = range & 0x00ff;
+ report->field[0]->value[3] = (range & 0xff00) >> 8;
+ report->field[0]->value[4] = 0x00;
+ report->field[0]->value[5] = 0x00;
+ report->field[0]->value[6] = 0x00;
+
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+}
+
+/* Sends commands to set range compatible with Driving Force Pro wheel */
+static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range)
+{
+ struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ int start_left, start_right, full_range;
+ dbg_hid("Driving Force Pro: setting range to %u\n", range);
+
+ /* Prepare "coarse" limit command */
+ report->field[0]->value[0] = 0xf8;
+ report->field[0]->value[1] = 0x00; /* Set later */
+ report->field[0]->value[2] = 0x00;
+ report->field[0]->value[3] = 0x00;
+ report->field[0]->value[4] = 0x00;
+ report->field[0]->value[5] = 0x00;
+ report->field[0]->value[6] = 0x00;
+
+ if (range > 200) {
+ report->field[0]->value[1] = 0x03;
+ full_range = 900;
+ } else {
+ report->field[0]->value[1] = 0x02;
+ full_range = 200;
+ }
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+
+ /* Prepare "fine" limit command */
+ report->field[0]->value[0] = 0x81;
+ report->field[0]->value[1] = 0x0b;
+ report->field[0]->value[2] = 0x00;
+ report->field[0]->value[3] = 0x00;
+ report->field[0]->value[4] = 0x00;
+ report->field[0]->value[5] = 0x00;
+ report->field[0]->value[6] = 0x00;
+
+ if (range == 200 || range == 900) { /* Do not apply any fine limit */
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+ return;
+ }
+
+ /* Construct fine limit command */
+ start_left = (((full_range - range + 1) * 2047) / full_range);
+ start_right = 0xfff - start_left;
+
+ report->field[0]->value[2] = start_left >> 4;
+ report->field[0]->value[3] = start_right >> 4;
+ report->field[0]->value[4] = 0xff;
+ report->field[0]->value[5] = (start_right & 0xe) << 4 | (start_left & 0xe);
+ report->field[0]->value[6] = 0xff;
usbhid_submit_report(hid, report, USB_DIR_OUT);
}
+static void hid_lg4ff_switch_native(struct hid_device *hid, const struct lg4ff_native_cmd *cmd)
+{
+ struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ __u8 i, j;
+
+ j = 0;
+ while (j < 7*cmd->cmd_num) {
+ for (i = 0; i < 7; i++)
+ report->field[0]->value[i] = cmd->cmd[j++];
+
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+ }
+}
+
+/* Read current range and display it in terminal */
+static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct lg4ff_device_entry *uninitialized_var(entry);
+ struct list_head *h;
+ struct hid_device *hid = to_hid_device(dev);
+ size_t count;
+
+ list_for_each(h, &device_list.list) {
+ entry = list_entry(h, struct lg4ff_device_entry, list);
+ if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0)
+ break;
+ }
+ if (h == &device_list.list) {
+ dbg_hid("Device not found!");
+ return 0;
+ }
+
+ count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->range);
+ return count;
+}
+
+/* Set range to user specified value, call appropriate function
+ * according to the type of the wheel */
+static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct lg4ff_device_entry *uninitialized_var(entry);
+ struct list_head *h;
+ struct hid_device *hid = to_hid_device(dev);
+ __u16 range = simple_strtoul(buf, NULL, 10);
+
+ list_for_each(h, &device_list.list) {
+ entry = list_entry(h, struct lg4ff_device_entry, list);
+ if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0)
+ break;
+ }
+ if (h == &device_list.list) {
+ dbg_hid("Device not found!");
+ return count;
+ }
+
+ if (range == 0)
+ range = entry->max_range;
+
+ /* Check if the wheel supports range setting
+ * and that the range is within limits for the wheel */
+ if (entry->set_range != NULL && range >= entry->min_range && range <= entry->max_range) {
+ entry->set_range(hid, range);
+ entry->range = range;
+ }
+
+ return count;
+}
int lg4ff_init(struct hid_device *hid)
{
@@ -95,9 +343,10 @@ int lg4ff_init(struct hid_device *hid)
struct input_dev *dev = hidinput->input;
struct hid_report *report;
struct hid_field *field;
- const signed short *ff_bits = ff4_wheel_ac;
- int error;
- int i;
+ struct lg4ff_device_entry *entry;
+ struct usb_device_descriptor *udesc;
+ int error, i, j;
+ __u16 bcdDevice, rev_maj, rev_min;
/* Find the report to use */
if (list_empty(report_list)) {
@@ -118,18 +367,122 @@ int lg4ff_init(struct hid_device *hid)
return -1;
}
- for (i = 0; ff_bits[i] >= 0; i++)
- set_bit(ff_bits[i], dev->ffbit);
+ /* Check what wheel has been connected */
+ for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
+ if (hid->product == lg4ff_devices[i].product_id) {
+ dbg_hid("Found compatible device, product ID %04X\n", lg4ff_devices[i].product_id);
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(lg4ff_devices)) {
+ hid_err(hid, "Device is not supported by lg4ff driver. If you think it should be, consider reporting a bug to"
+ "LKML, Simon Wood <simon@mungewell.org> or Michal Maly <madcatxster@gmail.com>\n");
+ return -1;
+ }
+
+ /* Attempt to switch wheel to native mode when applicable */
+ udesc = &(hid_to_usb_dev(hid)->descriptor);
+ if (!udesc) {
+ hid_err(hid, "NULL USB device descriptor\n");
+ return -1;
+ }
+ bcdDevice = le16_to_cpu(udesc->bcdDevice);
+ rev_maj = bcdDevice >> 8;
+ rev_min = bcdDevice & 0xff;
+
+ if (lg4ff_devices[i].product_id == USB_DEVICE_ID_LOGITECH_WHEEL) {
+ dbg_hid("Generic wheel detected, can it do native?\n");
+ dbg_hid("USB revision: %2x.%02x\n", rev_maj, rev_min);
+
+ for (j = 0; j < ARRAY_SIZE(lg4ff_revs); j++) {
+ if (lg4ff_revs[j].rev_maj == rev_maj && lg4ff_revs[j].rev_min == rev_min) {
+ hid_lg4ff_switch_native(hid, lg4ff_revs[j].command);
+ hid_info(hid, "Switched to native mode\n");
+ }
+ }
+ }
+
+ /* Set supported force feedback capabilities */
+ for (j = 0; lg4ff_devices[i].ff_effects[j] >= 0; j++)
+ set_bit(lg4ff_devices[i].ff_effects[j], dev->ffbit);
error = input_ff_create_memless(dev, NULL, hid_lg4ff_play);
if (error)
return error;
- if (test_bit(FF_AUTOCENTER, dev->ffbit))
- dev->ff->set_autocenter = hid_lg4ff_set_autocenter;
+ /* Check if autocentering is available and
+ * set the centering force to zero by default */
+ if (test_bit(FF_AUTOCENTER, dev->ffbit)) {
+ if(rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN) /* Formula Force EX expects different autocentering command */
+ dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex;
+ else
+ dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default;
+
+ dev->ff->set_autocenter(dev, 0);
+ }
+
+ /* Initialize device_list if this is the first device to handle by lg4ff */
+ if (!list_inited) {
+ INIT_LIST_HEAD(&device_list.list);
+ list_inited = 1;
+ }
+
+ /* Add the device to device_list */
+ entry = (struct lg4ff_device_entry *)kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL);
+ if (!entry) {
+ hid_err(hid, "Cannot add device, insufficient memory.\n");
+ return -ENOMEM;
+ }
+ entry->device_id = kstrdup((&hid->dev)->kobj.name, GFP_KERNEL);
+ if (!entry->device_id) {
+ hid_err(hid, "Cannot set device_id, insufficient memory.\n");
+ kfree(entry);
+ return -ENOMEM;
+ }
+ entry->min_range = lg4ff_devices[i].min_range;
+ entry->max_range = lg4ff_devices[i].max_range;
+ entry->set_range = lg4ff_devices[i].set_range;
+ list_add(&entry->list, &device_list.list);
+
+ /* Create sysfs interface */
+ error = device_create_file(&hid->dev, &dev_attr_range);
+ if (error)
+ return error;
+ dbg_hid("sysfs interface created\n");
+
+ /* Set the maximum range to start with */
+ entry->range = entry->max_range;
+ if (entry->set_range != NULL)
+ entry->set_range(hid, entry->range);
hid_info(hid, "Force feedback for Logitech Speed Force Wireless by Simon Wood <simon@mungewell.org>\n");
return 0;
}
+int lg4ff_deinit(struct hid_device *hid)
+{
+ bool found = 0;
+ struct lg4ff_device_entry *entry;
+ struct list_head *h, *g;
+ list_for_each_safe(h, g, &device_list.list) {
+ entry = list_entry(h, struct lg4ff_device_entry, list);
+ if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0) {
+ list_del(h);
+ kfree(entry->device_id);
+ kfree(entry);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ dbg_hid("Device entry not found!\n");
+ return -1;
+ }
+
+ device_remove_file(&hid->dev, &dev_attr_range);
+ dbg_hid("Device successfully unregistered\n");
+ return 0;
+}
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
index 088f8504929..27bc54f92f4 100644
--- a/drivers/hid/hid-lgff.c
+++ b/drivers/hid/hid-lgff.c
@@ -58,12 +58,6 @@ static const signed short ff_joystick_ac[] = {
-1
};
-static const signed short ff_wheel[] = {
- FF_CONSTANT,
- FF_AUTOCENTER,
- -1
-};
-
static const struct dev_type devices[] = {
{ 0x046d, 0xc211, ff_rumble },
{ 0x046d, 0xc219, ff_rumble },
@@ -71,14 +65,7 @@ static const struct dev_type devices[] = {
{ 0x046d, 0xc286, ff_joystick_ac },
{ 0x046d, 0xc287, ff_joystick_ac },
{ 0x046d, 0xc293, ff_joystick },
- { 0x046d, 0xc294, ff_wheel },
- { 0x046d, 0xc298, ff_wheel },
- { 0x046d, 0xc299, ff_wheel },
- { 0x046d, 0xc29b, ff_wheel },
{ 0x046d, 0xc295, ff_joystick },
- { 0x046d, 0xc298, ff_wheel },
- { 0x046d, 0xc299, ff_wheel },
- { 0x046d, 0xca03, ff_wheel },
};
static int hid_lgff_play(struct input_dev *dev, void *data, struct ff_effect *effect)
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
new file mode 100644
index 00000000000..38b12e45780
--- /dev/null
+++ b/drivers/hid/hid-logitech-dj.c
@@ -0,0 +1,922 @@
+/*
+ * HID driver for Logitech Unifying receivers
+ *
+ * Copyright (c) 2011 Logitech
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include "usbhid/usbhid.h"
+#include "hid-ids.h"
+#include "hid-logitech-dj.h"
+
+/* Keyboard descriptor (1) */
+static const char kbd_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (generic Desktop) */
+ 0x09, 0x06, /* USAGE (Keyboard) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x01, /* REPORT_ID (1) */
+ 0x95, 0x08, /* REPORT_COUNT (8) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */
+ 0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
+ 0x05, 0x07, /* USAGE_PAGE (Keyboard) */
+ 0x19, 0xE0, /* USAGE_MINIMUM (Left Control) */
+ 0x29, 0xE7, /* USAGE_MAXIMUM (Right GUI) */
+ 0x81, 0x02, /* INPUT (Data,Var,Abs) */
+ 0x95, 0x05, /* REPORT COUNT (5) */
+ 0x05, 0x08, /* USAGE PAGE (LED page) */
+ 0x19, 0x01, /* USAGE MINIMUM (1) */
+ 0x29, 0x05, /* USAGE MAXIMUM (5) */
+ 0x91, 0x02, /* OUTPUT (Data, Variable, Absolute) */
+ 0x95, 0x01, /* REPORT COUNT (1) */
+ 0x75, 0x03, /* REPORT SIZE (3) */
+ 0x91, 0x01, /* OUTPUT (Constant) */
+ 0x95, 0x06, /* REPORT_COUNT (6) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */
+ 0x26, 0xFF, 0x00, /* LOGICAL_MAXIMUM (255) */
+ 0x05, 0x07, /* USAGE_PAGE (Keyboard) */
+ 0x19, 0x00, /* USAGE_MINIMUM (no event) */
+ 0x2A, 0xFF, 0x00, /* USAGE_MAXIMUM (reserved) */
+ 0x81, 0x00, /* INPUT (Data,Ary,Abs) */
+ 0xC0
+};
+
+/* Mouse descriptor (2) */
+static const char mse_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x02, /* USAGE (Mouse) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x02, /* REPORT_ID = 2 */
+ 0x09, 0x01, /* USAGE (pointer) */
+ 0xA1, 0x00, /* COLLECTION (physical) */
+ 0x05, 0x09, /* USAGE_PAGE (buttons) */
+ 0x19, 0x01, /* USAGE_MIN (1) */
+ 0x29, 0x10, /* USAGE_MAX (16) */
+ 0x15, 0x00, /* LOGICAL_MIN (0) */
+ 0x25, 0x01, /* LOGICAL_MAX (1) */
+ 0x95, 0x10, /* REPORT_COUNT (16) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x81, 0x02, /* INPUT (data var abs) */
+ 0x05, 0x01, /* USAGE_PAGE (generic desktop) */
+ 0x16, 0x01, 0xF8, /* LOGICAL_MIN (-2047) */
+ 0x26, 0xFF, 0x07, /* LOGICAL_MAX (2047) */
+ 0x75, 0x0C, /* REPORT_SIZE (12) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x09, 0x30, /* USAGE (X) */
+ 0x09, 0x31, /* USAGE (Y) */
+ 0x81, 0x06, /* INPUT */
+ 0x15, 0x81, /* LOGICAL_MIN (-127) */
+ 0x25, 0x7F, /* LOGICAL_MAX (127) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x09, 0x38, /* USAGE (wheel) */
+ 0x81, 0x06, /* INPUT */
+ 0x05, 0x0C, /* USAGE_PAGE(consumer) */
+ 0x0A, 0x38, 0x02, /* USAGE(AC Pan) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x81, 0x06, /* INPUT */
+ 0xC0, /* END_COLLECTION */
+ 0xC0, /* END_COLLECTION */
+};
+
+/* Consumer Control descriptor (3) */
+static const char consumer_descriptor[] = {
+ 0x05, 0x0C, /* USAGE_PAGE (Consumer Devices) */
+ 0x09, 0x01, /* USAGE (Consumer Control) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x03, /* REPORT_ID = 3 */
+ 0x75, 0x10, /* REPORT_SIZE (16) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x15, 0x01, /* LOGICAL_MIN (1) */
+ 0x26, 0x8C, 0x02, /* LOGICAL_MAX (652) */
+ 0x19, 0x01, /* USAGE_MIN (1) */
+ 0x2A, 0x8C, 0x02, /* USAGE_MAX (652) */
+ 0x81, 0x00, /* INPUT (Data Ary Abs) */
+ 0xC0, /* END_COLLECTION */
+}; /* */
+
+/* System control descriptor (4) */
+static const char syscontrol_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x80, /* USAGE (System Control) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x04, /* REPORT_ID = 4 */
+ 0x75, 0x02, /* REPORT_SIZE (2) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x15, 0x01, /* LOGICAL_MIN (1) */
+ 0x25, 0x03, /* LOGICAL_MAX (3) */
+ 0x09, 0x82, /* USAGE (System Sleep) */
+ 0x09, 0x81, /* USAGE (System Power Down) */
+ 0x09, 0x83, /* USAGE (System Wake Up) */
+ 0x81, 0x60, /* INPUT (Data Ary Abs NPrf Null) */
+ 0x75, 0x06, /* REPORT_SIZE (6) */
+ 0x81, 0x03, /* INPUT (Cnst Var Abs) */
+ 0xC0, /* END_COLLECTION */
+};
+
+/* Media descriptor (8) */
+static const char media_descriptor[] = {
+ 0x06, 0xbc, 0xff, /* Usage Page 0xffbc */
+ 0x09, 0x88, /* Usage 0x0088 */
+ 0xa1, 0x01, /* BeginCollection */
+ 0x85, 0x08, /* Report ID 8 */
+ 0x19, 0x01, /* Usage Min 0x0001 */
+ 0x29, 0xff, /* Usage Max 0x00ff */
+ 0x15, 0x01, /* Logical Min 1 */
+ 0x26, 0xff, 0x00, /* Logical Max 255 */
+ 0x75, 0x08, /* Report Size 8 */
+ 0x95, 0x01, /* Report Count 1 */
+ 0x81, 0x00, /* Input */
+ 0xc0, /* EndCollection */
+}; /* */
+
+/* Maximum size of all defined hid reports in bytes (including report id) */
+#define MAX_REPORT_SIZE 8
+
+/* Number of possible hid report types that can be created by this driver.
+ *
+ * Right now, RF report types have the same report types (or report id's)
+ * than the hid report created from those RF reports. In the future
+ * this doesnt have to be true.
+ *
+ * For instance, RF report type 0x01 which has a size of 8 bytes, corresponds
+ * to hid report id 0x01, this is standard keyboard. Same thing applies to mice
+ * reports and consumer control, etc. If a new RF report is created, it doesn't
+ * has to have the same report id as its corresponding hid report, so an
+ * translation may have to take place for future report types.
+ */
+#define NUMBER_OF_HID_REPORTS 32
+static const u8 hid_reportid_size_map[NUMBER_OF_HID_REPORTS] = {
+ [1] = 8, /* Standard keyboard */
+ [2] = 8, /* Standard mouse */
+ [3] = 5, /* Consumer control */
+ [4] = 2, /* System control */
+ [8] = 2, /* Media Center */
+};
+
+
+#define LOGITECH_DJ_INTERFACE_NUMBER 0x02
+
+static struct hid_ll_driver logi_dj_ll_driver;
+
+static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
+ size_t count,
+ unsigned char report_type);
+
+static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ /* Called in delayed work context */
+ struct dj_device *dj_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ dj_dev = djrcv_dev->paired_dj_devices[dj_report->device_index];
+ djrcv_dev->paired_dj_devices[dj_report->device_index] = NULL;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
+ if (dj_dev != NULL) {
+ hid_destroy_device(dj_dev->hdev);
+ kfree(dj_dev);
+ } else {
+ dev_err(&djrcv_dev->hdev->dev, "%s: can't destroy a NULL device\n",
+ __func__);
+ }
+}
+
+static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ /* Called in delayed work context */
+ struct hid_device *djrcv_hdev = djrcv_dev->hdev;
+ struct usb_interface *intf = to_usb_interface(djrcv_hdev->dev.parent);
+ struct usb_device *usbdev = interface_to_usbdev(intf);
+ struct hid_device *dj_hiddev;
+ struct dj_device *dj_dev;
+
+ /* Device index goes from 1 to 6, we need 3 bytes to store the
+ * semicolon, the index, and a null terminator
+ */
+ unsigned char tmpstr[3];
+
+ if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
+ SPFUNCTION_DEVICE_LIST_EMPTY) {
+ dbg_hid("%s: device list is empty\n", __func__);
+ return;
+ }
+
+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
+ dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n",
+ __func__, dj_report->device_index);
+ return;
+ }
+
+ dj_hiddev = hid_allocate_device();
+ if (IS_ERR(dj_hiddev)) {
+ dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n",
+ __func__);
+ return;
+ }
+
+ dj_hiddev->ll_driver = &logi_dj_ll_driver;
+ dj_hiddev->hid_output_raw_report = logi_dj_output_hidraw_report;
+
+ dj_hiddev->dev.parent = &djrcv_hdev->dev;
+ dj_hiddev->bus = BUS_USB;
+ dj_hiddev->vendor = le16_to_cpu(usbdev->descriptor.idVendor);
+ dj_hiddev->product = le16_to_cpu(usbdev->descriptor.idProduct);
+ snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
+ "Logitech Unifying Device. Wireless PID:%02x%02x",
+ dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB],
+ dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB]);
+
+ usb_make_path(usbdev, dj_hiddev->phys, sizeof(dj_hiddev->phys));
+ snprintf(tmpstr, sizeof(tmpstr), ":%d", dj_report->device_index);
+ strlcat(dj_hiddev->phys, tmpstr, sizeof(dj_hiddev->phys));
+
+ dj_dev = kzalloc(sizeof(struct dj_device), GFP_KERNEL);
+
+ if (!dj_dev) {
+ dev_err(&djrcv_hdev->dev, "%s: failed allocating dj_device\n",
+ __func__);
+ goto dj_device_allocate_fail;
+ }
+
+ dj_dev->reports_supported = le32_to_cpu(
+ dj_report->report_params[DEVICE_PAIRED_RF_REPORT_TYPE]);
+ dj_dev->hdev = dj_hiddev;
+ dj_dev->dj_receiver_dev = djrcv_dev;
+ dj_dev->device_index = dj_report->device_index;
+ dj_hiddev->driver_data = dj_dev;
+
+ djrcv_dev->paired_dj_devices[dj_report->device_index] = dj_dev;
+
+ if (hid_add_device(dj_hiddev)) {
+ dev_err(&djrcv_hdev->dev, "%s: failed adding dj_device\n",
+ __func__);
+ goto hid_add_device_fail;
+ }
+
+ return;
+
+hid_add_device_fail:
+ djrcv_dev->paired_dj_devices[dj_report->device_index] = NULL;
+ kfree(dj_dev);
+dj_device_allocate_fail:
+ hid_destroy_device(dj_hiddev);
+}
+
+static void delayedwork_callback(struct work_struct *work)
+{
+ struct dj_receiver_dev *djrcv_dev =
+ container_of(work, struct dj_receiver_dev, work);
+
+ struct dj_report dj_report;
+ unsigned long flags;
+ int count;
+
+ dbg_hid("%s\n", __func__);
+
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+
+ count = kfifo_out(&djrcv_dev->notif_fifo, &dj_report,
+ sizeof(struct dj_report));
+
+ if (count != sizeof(struct dj_report)) {
+ dev_err(&djrcv_dev->hdev->dev, "%s: workitem triggered without "
+ "notifications available\n", __func__);
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+ return;
+ }
+
+ if (!kfifo_is_empty(&djrcv_dev->notif_fifo)) {
+ if (schedule_work(&djrcv_dev->work) == 0) {
+ dbg_hid("%s: did not schedule the work item, was "
+ "already queued\n", __func__);
+ }
+ }
+
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
+ switch (dj_report.report_type) {
+ case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
+ logi_dj_recv_add_djhid_device(djrcv_dev, &dj_report);
+ break;
+ case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
+ logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report);
+ break;
+ default:
+ dbg_hid("%s: unexpected report type\n", __func__);
+ }
+}
+
+static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ /* We are called from atomic context (tasklet && djrcv->lock held) */
+
+ kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
+
+ if (schedule_work(&djrcv_dev->work) == 0) {
+ dbg_hid("%s: did not schedule the work item, was already "
+ "queued\n", __func__);
+ }
+}
+
+static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ /* We are called from atomic context (tasklet && djrcv->lock held) */
+ unsigned int i;
+ u8 reportbuffer[MAX_REPORT_SIZE];
+ struct dj_device *djdev;
+
+ djdev = djrcv_dev->paired_dj_devices[dj_report->device_index];
+
+ if (!djdev) {
+ dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
+ " is NULL, index %d\n", dj_report->device_index);
+ return;
+ }
+
+ memset(reportbuffer, 0, sizeof(reportbuffer));
+
+ for (i = 0; i < NUMBER_OF_HID_REPORTS; i++) {
+ if (djdev->reports_supported & (1 << i)) {
+ reportbuffer[0] = i;
+ if (hid_input_report(djdev->hdev,
+ HID_INPUT_REPORT,
+ reportbuffer,
+ hid_reportid_size_map[i], 1)) {
+ dbg_hid("hid_input_report error sending null "
+ "report\n");
+ }
+ }
+ }
+}
+
+static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ /* We are called from atomic context (tasklet && djrcv->lock held) */
+ struct dj_device *dj_device;
+
+ dj_device = djrcv_dev->paired_dj_devices[dj_report->device_index];
+
+ if (dj_device == NULL) {
+ dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
+ " is NULL, index %d\n", dj_report->device_index);
+ return;
+ }
+
+ if ((dj_report->report_type > ARRAY_SIZE(hid_reportid_size_map) - 1) ||
+ (hid_reportid_size_map[dj_report->report_type] == 0)) {
+ dbg_hid("invalid report type:%x\n", dj_report->report_type);
+ return;
+ }
+
+ if (hid_input_report(dj_device->hdev,
+ HID_INPUT_REPORT, &dj_report->report_type,
+ hid_reportid_size_map[dj_report->report_type], 1)) {
+ dbg_hid("hid_input_report error\n");
+ }
+}
+
+
+static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ struct hid_device *hdev = djrcv_dev->hdev;
+ int sent_bytes;
+
+ if (!hdev->hid_output_raw_report) {
+ dev_err(&hdev->dev, "%s:"
+ "hid_output_raw_report is null\n", __func__);
+ return -ENODEV;
+ }
+
+ sent_bytes = hdev->hid_output_raw_report(hdev, (u8 *) dj_report,
+ sizeof(struct dj_report),
+ HID_OUTPUT_REPORT);
+
+ return (sent_bytes < 0) ? sent_bytes : 0;
+}
+
+static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
+{
+ struct dj_report dj_report;
+
+ memset(&dj_report, 0, sizeof(dj_report));
+ dj_report.report_id = REPORT_ID_DJ_SHORT;
+ dj_report.device_index = 0xFF;
+ dj_report.report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
+ return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+}
+
+static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
+ unsigned timeout)
+{
+ struct dj_report dj_report;
+
+ memset(&dj_report, 0, sizeof(dj_report));
+ dj_report.report_id = REPORT_ID_DJ_SHORT;
+ dj_report.device_index = 0xFF;
+ dj_report.report_type = REPORT_TYPE_CMD_SWITCH;
+ dj_report.report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x1F;
+ dj_report.report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
+ return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+}
+
+
+static int logi_dj_ll_open(struct hid_device *hid)
+{
+ dbg_hid("%s:%s\n", __func__, hid->phys);
+ return 0;
+
+}
+
+static void logi_dj_ll_close(struct hid_device *hid)
+{
+ dbg_hid("%s:%s\n", __func__, hid->phys);
+}
+
+static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
+ size_t count,
+ unsigned char report_type)
+{
+ /* Called by hid raw to send data */
+ dbg_hid("%s\n", __func__);
+
+ return 0;
+}
+
+static int logi_dj_ll_parse(struct hid_device *hid)
+{
+ struct dj_device *djdev = hid->driver_data;
+ int retval;
+
+ dbg_hid("%s\n", __func__);
+
+ djdev->hdev->version = 0x0111;
+ djdev->hdev->country = 0x00;
+
+ if (djdev->reports_supported & STD_KEYBOARD) {
+ dbg_hid("%s: sending a kbd descriptor, reports_supported: %x\n",
+ __func__, djdev->reports_supported);
+ retval = hid_parse_report(hid,
+ (u8 *) kbd_descriptor,
+ sizeof(kbd_descriptor));
+ if (retval) {
+ dbg_hid("%s: sending a kbd descriptor, hid_parse failed"
+ " error: %d\n", __func__, retval);
+ return retval;
+ }
+ }
+
+ if (djdev->reports_supported & STD_MOUSE) {
+ dbg_hid("%s: sending a mouse descriptor, reports_supported: "
+ "%x\n", __func__, djdev->reports_supported);
+ retval = hid_parse_report(hid,
+ (u8 *) mse_descriptor,
+ sizeof(mse_descriptor));
+ if (retval) {
+ dbg_hid("%s: sending a mouse descriptor, hid_parse "
+ "failed error: %d\n", __func__, retval);
+ return retval;
+ }
+ }
+
+ if (djdev->reports_supported & MULTIMEDIA) {
+ dbg_hid("%s: sending a multimedia report descriptor: %x\n",
+ __func__, djdev->reports_supported);
+ retval = hid_parse_report(hid,
+ (u8 *) consumer_descriptor,
+ sizeof(consumer_descriptor));
+ if (retval) {
+ dbg_hid("%s: sending a consumer_descriptor, hid_parse "
+ "failed error: %d\n", __func__, retval);
+ return retval;
+ }
+ }
+
+ if (djdev->reports_supported & POWER_KEYS) {
+ dbg_hid("%s: sending a power keys report descriptor: %x\n",
+ __func__, djdev->reports_supported);
+ retval = hid_parse_report(hid,
+ (u8 *) syscontrol_descriptor,
+ sizeof(syscontrol_descriptor));
+ if (retval) {
+ dbg_hid("%s: sending a syscontrol_descriptor, "
+ "hid_parse failed error: %d\n",
+ __func__, retval);
+ return retval;
+ }
+ }
+
+ if (djdev->reports_supported & MEDIA_CENTER) {
+ dbg_hid("%s: sending a media center report descriptor: %x\n",
+ __func__, djdev->reports_supported);
+ retval = hid_parse_report(hid,
+ (u8 *) media_descriptor,
+ sizeof(media_descriptor));
+ if (retval) {
+ dbg_hid("%s: sending a media_descriptor, hid_parse "
+ "failed error: %d\n", __func__, retval);
+ return retval;
+ }
+ }
+
+ if (djdev->reports_supported & KBD_LEDS) {
+ dbg_hid("%s: need to send kbd leds report descriptor: %x\n",
+ __func__, djdev->reports_supported);
+ }
+
+ return 0;
+}
+
+static int logi_dj_ll_input_event(struct input_dev *dev, unsigned int type,
+ unsigned int code, int value)
+{
+ /* Sent by the input layer to handle leds and Force Feedback */
+ struct hid_device *dj_hiddev = input_get_drvdata(dev);
+ struct dj_device *dj_dev = dj_hiddev->driver_data;
+
+ struct dj_receiver_dev *djrcv_dev =
+ dev_get_drvdata(dj_hiddev->dev.parent);
+ struct hid_device *dj_rcv_hiddev = djrcv_dev->hdev;
+ struct hid_report_enum *output_report_enum;
+
+ struct hid_field *field;
+ struct hid_report *report;
+ unsigned char data[8];
+ int offset;
+
+ dbg_hid("%s: %s, type:%d | code:%d | value:%d\n",
+ __func__, dev->phys, type, code, value);
+
+ if (type != EV_LED)
+ return -1;
+
+ offset = hidinput_find_field(dj_hiddev, type, code, &field);
+
+ if (offset == -1) {
+ dev_warn(&dev->dev, "event field not found\n");
+ return -1;
+ }
+ hid_set_field(field, offset, value);
+ hid_output_report(field->report, &data[0]);
+
+ output_report_enum = &dj_rcv_hiddev->report_enum[HID_OUTPUT_REPORT];
+ report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
+ hid_set_field(report->field[0], 0, dj_dev->device_index);
+ hid_set_field(report->field[0], 1, REPORT_TYPE_LEDS);
+ hid_set_field(report->field[0], 2, data[1]);
+
+ usbhid_submit_report(dj_rcv_hiddev, report, USB_DIR_OUT);
+
+ return 0;
+
+}
+
+static int logi_dj_ll_start(struct hid_device *hid)
+{
+ dbg_hid("%s\n", __func__);
+ return 0;
+}
+
+static void logi_dj_ll_stop(struct hid_device *hid)
+{
+ dbg_hid("%s\n", __func__);
+}
+
+
+static struct hid_ll_driver logi_dj_ll_driver = {
+ .parse = logi_dj_ll_parse,
+ .start = logi_dj_ll_start,
+ .stop = logi_dj_ll_stop,
+ .open = logi_dj_ll_open,
+ .close = logi_dj_ll_close,
+ .hidinput_input_event = logi_dj_ll_input_event,
+};
+
+
+static int logi_dj_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data,
+ int size)
+{
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ struct dj_report *dj_report = (struct dj_report *) data;
+ unsigned long flags;
+ bool report_processed = false;
+
+ dbg_hid("%s, size:%d\n", __func__, size);
+
+ /* Here we receive all data coming from iface 2, there are 4 cases:
+ *
+ * 1) Data should continue its normal processing i.e. data does not
+ * come from the DJ collection, in which case we do nothing and
+ * return 0, so hid-core can continue normal processing (will forward
+ * to associated hidraw device)
+ *
+ * 2) Data is from DJ collection, and is intended for this driver i. e.
+ * data contains arrival, departure, etc notifications, in which case
+ * we queue them for delayed processing by the work queue. We return 1
+ * to hid-core as no further processing is required from it.
+ *
+ * 3) Data is from DJ collection, and informs a connection change,
+ * if the change means rf link loss, then we must send a null report
+ * to the upper layer to discard potentially pressed keys that may be
+ * repeated forever by the input layer. Return 1 to hid-core as no
+ * further processing is required.
+ *
+ * 4) Data is from DJ collection and is an actual input event from
+ * a paired DJ device in which case we forward it to the correct hid
+ * device (via hid_input_report() ) and return 1 so hid-core does not do
+ * anything else with it.
+ */
+
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
+ switch (dj_report->report_type) {
+ case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
+ case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
+ logi_dj_recv_queue_notification(djrcv_dev, dj_report);
+ break;
+ case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
+ if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
+ STATUS_LINKLOSS) {
+ logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
+ }
+ break;
+ default:
+ logi_dj_recv_forward_report(djrcv_dev, dj_report);
+ }
+ report_processed = true;
+ }
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
+ return report_processed;
+}
+
+static int logi_dj_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct dj_receiver_dev *djrcv_dev;
+ int retval;
+
+ if (is_dj_device((struct dj_device *)hdev->driver_data))
+ return -ENODEV;
+
+ dbg_hid("%s called for ifnum %d\n", __func__,
+ intf->cur_altsetting->desc.bInterfaceNumber);
+
+ /* Ignore interfaces 0 and 1, they will not carry any data, dont create
+ * any hid_device for them */
+ if (intf->cur_altsetting->desc.bInterfaceNumber !=
+ LOGITECH_DJ_INTERFACE_NUMBER) {
+ dbg_hid("%s: ignoring ifnum %d\n", __func__,
+ intf->cur_altsetting->desc.bInterfaceNumber);
+ return -ENODEV;
+ }
+
+ /* Treat interface 2 */
+
+ djrcv_dev = kzalloc(sizeof(struct dj_receiver_dev), GFP_KERNEL);
+ if (!djrcv_dev) {
+ dev_err(&hdev->dev,
+ "%s:failed allocating dj_receiver_dev\n", __func__);
+ return -ENOMEM;
+ }
+ djrcv_dev->hdev = hdev;
+ INIT_WORK(&djrcv_dev->work, delayedwork_callback);
+ spin_lock_init(&djrcv_dev->lock);
+ if (kfifo_alloc(&djrcv_dev->notif_fifo,
+ DJ_MAX_NUMBER_NOTIFICATIONS * sizeof(struct dj_report),
+ GFP_KERNEL)) {
+ dev_err(&hdev->dev,
+ "%s:failed allocating notif_fifo\n", __func__);
+ kfree(djrcv_dev);
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, djrcv_dev);
+
+ /* Call to usbhid to fetch the HID descriptors of interface 2 and
+ * subsequently call to the hid/hid-core to parse the fetched
+ * descriptors, this will in turn create the hidraw and hiddev nodes
+ * for interface 2 of the receiver */
+ retval = hid_parse(hdev);
+ if (retval) {
+ dev_err(&hdev->dev,
+ "%s:parse of interface 2 failed\n", __func__);
+ goto hid_parse_fail;
+ }
+
+ /* Starts the usb device and connects to upper interfaces hiddev and
+ * hidraw */
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ dev_err(&hdev->dev,
+ "%s:hid_hw_start returned error\n", __func__);
+ goto hid_hw_start_fail;
+ }
+
+ retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
+ if (retval < 0) {
+ dev_err(&hdev->dev,
+ "%s:logi_dj_recv_switch_to_dj_mode returned error:%d\n",
+ __func__, retval);
+ goto switch_to_dj_mode_fail;
+ }
+
+ /* This is enabling the polling urb on the IN endpoint */
+ retval = hdev->ll_driver->open(hdev);
+ if (retval < 0) {
+ dev_err(&hdev->dev, "%s:hdev->ll_driver->open returned "
+ "error:%d\n", __func__, retval);
+ goto llopen_failed;
+ }
+
+ retval = logi_dj_recv_query_paired_devices(djrcv_dev);
+ if (retval < 0) {
+ dev_err(&hdev->dev, "%s:logi_dj_recv_query_paired_devices "
+ "error:%d\n", __func__, retval);
+ goto logi_dj_recv_query_paired_devices_failed;
+ }
+
+ return retval;
+
+logi_dj_recv_query_paired_devices_failed:
+ hdev->ll_driver->close(hdev);
+
+llopen_failed:
+switch_to_dj_mode_fail:
+ hid_hw_stop(hdev);
+
+hid_hw_start_fail:
+hid_parse_fail:
+ kfifo_free(&djrcv_dev->notif_fifo);
+ kfree(djrcv_dev);
+ hid_set_drvdata(hdev, NULL);
+ return retval;
+
+}
+
+#ifdef CONFIG_PM
+static int logi_dj_reset_resume(struct hid_device *hdev)
+{
+ int retval;
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+
+ retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
+ if (retval < 0) {
+ dev_err(&hdev->dev,
+ "%s:logi_dj_recv_switch_to_dj_mode returned error:%d\n",
+ __func__, retval);
+ }
+
+ return 0;
+}
+#endif
+
+static void logi_dj_remove(struct hid_device *hdev)
+{
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ struct dj_device *dj_dev;
+ int i;
+
+ dbg_hid("%s\n", __func__);
+
+ cancel_work_sync(&djrcv_dev->work);
+
+ hdev->ll_driver->close(hdev);
+ hid_hw_stop(hdev);
+
+ /* I suppose that at this point the only context that can access
+ * the djrecv_data is this thread as the work item is guaranteed to
+ * have finished and no more raw_event callbacks should arrive after
+ * the remove callback was triggered so no locks are put around the
+ * code below */
+ for (i = 0; i < (DJ_MAX_PAIRED_DEVICES + DJ_DEVICE_INDEX_MIN); i++) {
+ dj_dev = djrcv_dev->paired_dj_devices[i];
+ if (dj_dev != NULL) {
+ hid_destroy_device(dj_dev->hdev);
+ kfree(dj_dev);
+ djrcv_dev->paired_dj_devices[i] = NULL;
+ }
+ }
+
+ kfifo_free(&djrcv_dev->notif_fifo);
+ kfree(djrcv_dev);
+ hid_set_drvdata(hdev, NULL);
+}
+
+static int logi_djdevice_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+ struct dj_device *dj_dev = hdev->driver_data;
+
+ if (!is_dj_device(dj_dev))
+ return -ENODEV;
+
+ ret = hid_parse(hdev);
+ if (!ret)
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+ return ret;
+}
+
+static const struct hid_device_id logi_dj_receivers[] = {
+ {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)},
+ {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(hid, logi_dj_receivers);
+
+static struct hid_driver logi_djreceiver_driver = {
+ .name = "logitech-djreceiver",
+ .id_table = logi_dj_receivers,
+ .probe = logi_dj_probe,
+ .remove = logi_dj_remove,
+ .raw_event = logi_dj_raw_event,
+#ifdef CONFIG_PM
+ .reset_resume = logi_dj_reset_resume,
+#endif
+};
+
+
+static const struct hid_device_id logi_dj_devices[] = {
+ {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)},
+ {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2)},
+ {}
+};
+
+static struct hid_driver logi_djdevice_driver = {
+ .name = "logitech-djdevice",
+ .id_table = logi_dj_devices,
+ .probe = logi_djdevice_probe,
+};
+
+
+static int __init logi_dj_init(void)
+{
+ int retval;
+
+ dbg_hid("Logitech-DJ:%s\n", __func__);
+
+ retval = hid_register_driver(&logi_djreceiver_driver);
+ if (retval)
+ return retval;
+
+ retval = hid_register_driver(&logi_djdevice_driver);
+ if (retval)
+ hid_unregister_driver(&logi_djreceiver_driver);
+
+ return retval;
+
+}
+
+static void __exit logi_dj_exit(void)
+{
+ dbg_hid("Logitech-DJ:%s\n", __func__);
+
+ hid_unregister_driver(&logi_djdevice_driver);
+ hid_unregister_driver(&logi_djreceiver_driver);
+
+}
+
+module_init(logi_dj_init);
+module_exit(logi_dj_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Logitech");
+MODULE_AUTHOR("Nestor Lopez Casado");
+MODULE_AUTHOR("nlopezcasad@logitech.com");
diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h
new file mode 100644
index 00000000000..fd28a5e0ca3
--- /dev/null
+++ b/drivers/hid/hid-logitech-dj.h
@@ -0,0 +1,123 @@
+#ifndef __HID_LOGITECH_DJ_H
+#define __HID_LOGITECH_DJ_H
+
+/*
+ * HID driver for Logitech Unifying receivers
+ *
+ * Copyright (c) 2011 Logitech
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kfifo.h>
+
+#define DJ_MAX_PAIRED_DEVICES 6
+#define DJ_MAX_NUMBER_NOTIFICATIONS 8
+#define DJ_DEVICE_INDEX_MIN 1
+#define DJ_DEVICE_INDEX_MAX 6
+
+#define DJREPORT_SHORT_LENGTH 15
+#define DJREPORT_LONG_LENGTH 32
+
+#define REPORT_ID_DJ_SHORT 0x20
+#define REPORT_ID_DJ_LONG 0x21
+
+#define REPORT_TYPE_RFREPORT_FIRST 0x01
+#define REPORT_TYPE_RFREPORT_LAST 0x1F
+
+/* Command Switch to DJ mode */
+#define REPORT_TYPE_CMD_SWITCH 0x80
+#define CMD_SWITCH_PARAM_DEVBITFIELD 0x00
+#define CMD_SWITCH_PARAM_TIMEOUT_SECONDS 0x01
+#define TIMEOUT_NO_KEEPALIVE 0x00
+
+/* Command to Get the list of Paired devices */
+#define REPORT_TYPE_CMD_GET_PAIRED_DEVICES 0x81
+
+/* Device Paired Notification */
+#define REPORT_TYPE_NOTIF_DEVICE_PAIRED 0x41
+#define SPFUNCTION_MORE_NOTIF_EXPECTED 0x01
+#define SPFUNCTION_DEVICE_LIST_EMPTY 0x02
+#define DEVICE_PAIRED_PARAM_SPFUNCTION 0x00
+#define DEVICE_PAIRED_PARAM_EQUAD_ID_LSB 0x01
+#define DEVICE_PAIRED_PARAM_EQUAD_ID_MSB 0x02
+#define DEVICE_PAIRED_RF_REPORT_TYPE 0x03
+
+/* Device Un-Paired Notification */
+#define REPORT_TYPE_NOTIF_DEVICE_UNPAIRED 0x40
+
+
+/* Connection Status Notification */
+#define REPORT_TYPE_NOTIF_CONNECTION_STATUS 0x42
+#define CONNECTION_STATUS_PARAM_STATUS 0x00
+#define STATUS_LINKLOSS 0x01
+
+/* Error Notification */
+#define REPORT_TYPE_NOTIF_ERROR 0x7F
+#define NOTIF_ERROR_PARAM_ETYPE 0x00
+#define ETYPE_KEEPALIVE_TIMEOUT 0x01
+
+/* supported DJ HID && RF report types */
+#define REPORT_TYPE_KEYBOARD 0x01
+#define REPORT_TYPE_MOUSE 0x02
+#define REPORT_TYPE_CONSUMER_CONTROL 0x03
+#define REPORT_TYPE_SYSTEM_CONTROL 0x04
+#define REPORT_TYPE_MEDIA_CENTER 0x08
+#define REPORT_TYPE_LEDS 0x0E
+
+/* RF Report types bitfield */
+#define STD_KEYBOARD 0x00000002
+#define STD_MOUSE 0x00000004
+#define MULTIMEDIA 0x00000008
+#define POWER_KEYS 0x00000010
+#define MEDIA_CENTER 0x00000100
+#define KBD_LEDS 0x00004000
+
+struct dj_report {
+ u8 report_id;
+ u8 device_index;
+ u8 report_type;
+ u8 report_params[DJREPORT_SHORT_LENGTH - 3];
+};
+
+struct dj_receiver_dev {
+ struct hid_device *hdev;
+ struct dj_device *paired_dj_devices[DJ_MAX_PAIRED_DEVICES +
+ DJ_DEVICE_INDEX_MIN];
+ struct work_struct work;
+ struct kfifo notif_fifo;
+ spinlock_t lock;
+};
+
+struct dj_device {
+ struct hid_device *hdev;
+ struct dj_receiver_dev *dj_receiver_dev;
+ u32 reports_supported;
+ u8 device_index;
+};
+
+/**
+ * is_dj_device - know if the given dj_device is not the receiver.
+ * @dj_dev: the dj device to test
+ *
+ * This macro tests if a struct dj_device pointer is a device created
+ * by the bus enumarator.
+ */
+#define is_dj_device(dj_dev) \
+ (&(dj_dev)->dj_receiver_dev->hdev->dev == (dj_dev)->hdev->dev.parent)
+
+#endif
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index f0fbd7bd239..2ab71758e2e 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -405,6 +405,13 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
__set_bit(REL_HWHEEL, input->relbit);
}
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ /* input->keybit is initialized with incorrect button info
+ * for Magic Trackpad. There really is only one physical
+ * button (BTN_LEFT == BTN_MOUSE). Make sure we don't
+ * advertise buttons that don't exist...
+ */
+ __clear_bit(BTN_RIGHT, input->keybit);
+ __clear_bit(BTN_MIDDLE, input->keybit);
__set_bit(BTN_MOUSE, input->keybit);
__set_bit(BTN_TOOL_FINGER, input->keybit);
__set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 58d0e7aaf08..fa5d7a1ffa9 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -47,10 +47,11 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_SLOT_IS_CONTACTID (1 << 1)
#define MT_QUIRK_CYPRESS (1 << 2)
#define MT_QUIRK_SLOT_IS_CONTACTNUMBER (1 << 3)
-#define MT_QUIRK_VALID_IS_INRANGE (1 << 4)
-#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 5)
-#define MT_QUIRK_EGALAX_XYZ_FIXUP (1 << 6)
-#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 7)
+#define MT_QUIRK_ALWAYS_VALID (1 << 4)
+#define MT_QUIRK_VALID_IS_INRANGE (1 << 5)
+#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6)
+#define MT_QUIRK_EGALAX_XYZ_FIXUP (1 << 7)
+#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8)
struct mt_slot {
__s32 x, y, p, w, h;
@@ -86,11 +87,12 @@ struct mt_class {
/* classes of device behavior */
#define MT_CLS_DEFAULT 0x0001
-#define MT_CLS_CONFIDENCE 0x0002
-#define MT_CLS_CONFIDENCE_MINUS_ONE 0x0003
-#define MT_CLS_DUAL_INRANGE_CONTACTID 0x0004
-#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0005
-#define MT_CLS_DUAL_NSMU_CONTACTID 0x0006
+#define MT_CLS_SERIAL 0x0002
+#define MT_CLS_CONFIDENCE 0x0003
+#define MT_CLS_CONFIDENCE_MINUS_ONE 0x0004
+#define MT_CLS_DUAL_INRANGE_CONTACTID 0x0005
+#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0006
+#define MT_CLS_DUAL_NSMU_CONTACTID 0x0007
/* vendor specific classes */
#define MT_CLS_3M 0x0101
@@ -134,6 +136,8 @@ static int find_slot_from_contactid(struct mt_device *td)
struct mt_class mt_classes[] = {
{ .name = MT_CLS_DEFAULT,
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP },
+ { .name = MT_CLS_SERIAL,
+ .quirks = MT_QUIRK_ALWAYS_VALID},
{ .name = MT_CLS_CONFIDENCE,
.quirks = MT_QUIRK_VALID_IS_CONFIDENCE },
{ .name = MT_CLS_CONFIDENCE_MINUS_ONE,
@@ -213,6 +217,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct mt_class *cls = td->mtclass;
__s32 quirks = cls->quirks;
+ /* Only map fields from TouchScreen or TouchPad collections.
+ * We need to ignore fields that belong to other collections
+ * such as Mouse that might have the same GenericDesktop usages. */
+ if (field->application == HID_DG_TOUCHSCREEN)
+ set_bit(INPUT_PROP_DIRECT, hi->input->propbit);
+ else if (field->application == HID_DG_TOUCHPAD)
+ set_bit(INPUT_PROP_POINTER, hi->input->propbit);
+ else
+ return 0;
+
switch (usage->hid & HID_USAGE_PAGE) {
case HID_UP_GENDESK:
@@ -277,6 +291,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
td->last_slot_field = usage->hid;
td->last_field_index = field->index;
td->last_mt_collection = usage->collection_index;
+ hdev->quirks &= ~HID_QUIRK_MULTITOUCH;
return 1;
case HID_DG_WIDTH:
hid_map_usage(hi, usage, bit, max,
@@ -435,7 +450,9 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
if (hid->claimed & HID_CLAIMED_INPUT && td->slots) {
switch (usage->hid) {
case HID_DG_INRANGE:
- if (quirks & MT_QUIRK_VALID_IS_INRANGE)
+ if (quirks & MT_QUIRK_ALWAYS_VALID)
+ td->curvalid = true;
+ else if (quirks & MT_QUIRK_VALID_IS_INRANGE)
td->curvalid = value;
break;
case HID_DG_TIPSWITCH:
@@ -513,12 +530,44 @@ static void mt_set_input_mode(struct hid_device *hdev)
}
}
+/* a list of devices for which there is a specialized multitouch driver */
+static const struct hid_device_id mt_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, 0x0001) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, 0x0006) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+ USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+ USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
+ { }
+};
+
+static bool mt_match_one_id(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ return id->bus == hdev->bus &&
+ (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
+ (id->product == HID_ANY_ID || id->product == hdev->product);
+}
+
+static const struct hid_device_id *mt_match_id(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ for (; id->bus; id++)
+ if (mt_match_one_id(hdev, id))
+ return id;
+
+ return NULL;
+}
+
static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret, i;
struct mt_device *td;
struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */
+ if (mt_match_id(hdev, mt_have_special_driver))
+ return -ENODEV;
+
for (i = 0; mt_classes[i].name ; i++) {
if (id->driver_data == mt_classes[i].name) {
mtclass = &(mt_classes[i]);
@@ -526,10 +575,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
}
- /* This allows the driver to correctly support devices
- * that emit events over several HID messages.
- */
- hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
td = kzalloc(sizeof(struct mt_device), GFP_KERNEL);
if (!td) {
@@ -545,10 +590,16 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret != 0)
goto fail;
+ hdev->quirks |= HID_QUIRK_MULTITOUCH;
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret)
goto fail;
+ /* This allows the driver to correctly support devices
+ * that emit events over several HID messages.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
+
td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot),
GFP_KERNEL);
if (!td->slots) {
@@ -662,6 +713,11 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
USB_DEVICE_ID_GOODTOUCH_000f) },
+ /* Ideacom panel */
+ { .driver_data = MT_CLS_SERIAL,
+ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM,
+ USB_DEVICE_ID_IDEACOM_IDC6650) },
+
/* Ilitek dual touch panel */
{ .driver_data = MT_CLS_DEFAULT,
HID_USB_DEVICE(USB_VENDOR_ID_ILITEK,
@@ -672,6 +728,11 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS,
USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
+ /* LG Display panels */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_LG,
+ USB_DEVICE_ID_LG_MULTITOUCH) },
+
/* Lumio panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
@@ -732,6 +793,10 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_XAT,
USB_DEVICE_ID_XAT_CSR) },
+ /* Rest of the world */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) },
+
{ }
};
MODULE_DEVICE_TABLE(hid, mt_devices);
diff --git a/drivers/hid/hid-primax.c b/drivers/hid/hid-primax.c
new file mode 100644
index 00000000000..4d3c60d8831
--- /dev/null
+++ b/drivers/hid/hid-primax.c
@@ -0,0 +1,117 @@
+/*
+ * HID driver for primax and similar keyboards with in-band modifiers
+ *
+ * Copyright 2011 Google Inc. All Rights Reserved
+ *
+ * Author:
+ * Terry Lambert <tlambert@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+static int px_raw_event(struct hid_device *hid, struct hid_report *report,
+ u8 *data, int size)
+{
+ int idx = size;
+
+ switch (report->id) {
+ case 0: /* keyboard input */
+ /*
+ * Convert in-band modifier key values into out of band
+ * modifier bits and pull the key strokes from the report.
+ * Thus a report data set which looked like:
+ *
+ * [00][00][E0][30][00][00][00][00]
+ * (no modifier bits + "Left Shift" key + "1" key)
+ *
+ * Would be converted to:
+ *
+ * [01][00][00][30][00][00][00][00]
+ * (Left Shift modifier bit + "1" key)
+ *
+ * As long as it's in the size range, the upper level
+ * drivers don't particularly care if there are in-band
+ * 0-valued keys, so they don't stop parsing.
+ */
+ while (--idx > 1) {
+ if (data[idx] < 0xE0 || data[idx] > 0xE7)
+ continue;
+ data[0] |= (1 << (data[idx] - 0xE0));
+ data[idx] = 0;
+ }
+ hid_report_raw_event(hid, HID_INPUT_REPORT, data, size, 0);
+ return 1;
+
+ default: /* unknown report */
+ /* Unknown report type; pass upstream */
+ hid_info(hid, "unknown report type %d\n", report->id);
+ break;
+ }
+
+ return 0;
+}
+
+static int px_probe(struct hid_device *hid, const struct hid_device_id *id)
+{
+ int ret;
+
+ ret = hid_parse(hid);
+ if (ret) {
+ hid_err(hid, "parse failed\n");
+ goto fail;
+ }
+
+ ret = hid_hw_start(hid, HID_CONNECT_DEFAULT);
+ if (ret)
+ hid_err(hid, "hw start failed\n");
+
+fail:
+ return ret;
+}
+
+static void px_remove(struct hid_device *hid)
+{
+ hid_hw_stop(hid);
+}
+
+static const struct hid_device_id px_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, px_devices);
+
+static struct hid_driver px_driver = {
+ .name = "primax",
+ .id_table = px_devices,
+ .raw_event = px_raw_event,
+ .probe = px_probe,
+ .remove = px_remove,
+};
+
+static int __init px_init(void)
+{
+ return hid_register_driver(&px_driver);
+}
+
+static void __exit px_exit(void)
+{
+ hid_unregister_driver(&px_driver);
+}
+
+module_init(px_init);
+module_exit(px_exit);
+MODULE_AUTHOR("Terry Lambert <tlambert@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index 158b389d0fb..f779009104e 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -816,7 +816,7 @@ static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (pm == NULL) {
hid_err(hdev, "can't alloc descriptor\n");
ret = -ENOMEM;
- goto err_free;
+ goto err_free_pk;
}
pm->pk = pk;
@@ -849,10 +849,10 @@ static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
err_stop:
hid_hw_stop(hdev);
err_free:
- if (pm != NULL)
- kfree(pm);
-
+ kfree(pm);
+err_free_pk:
kfree(pk);
+
return ret;
}
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 2b8f3a31ffb..e2072afb34b 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -37,6 +37,21 @@
static uint profile_numbers[5] = {0, 1, 2, 3, 4};
+static void kone_profile_activated(struct kone_device *kone, uint new_profile)
+{
+ kone->actual_profile = new_profile;
+ kone->actual_dpi = kone->profiles[new_profile - 1].startup_dpi;
+}
+
+static void kone_profile_report(struct kone_device *kone, uint new_profile)
+{
+ struct kone_roccat_report roccat_report;
+ roccat_report.event = kone_mouse_event_switch_profile;
+ roccat_report.value = new_profile;
+ roccat_report.key = 0;
+ roccat_report_event(kone->chrdev_minor, (uint8_t *)&roccat_report);
+}
+
static int kone_receive(struct usb_device *usb_dev, uint usb_command,
void *data, uint size)
{
@@ -283,7 +298,7 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
container_of(kobj, struct device, kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
- int retval = 0, difference;
+ int retval = 0, difference, old_profile;
/* I need to get my data in one piece */
if (off != 0 || count != sizeof(struct kone_settings))
@@ -294,21 +309,20 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
if (difference) {
retval = kone_set_settings(usb_dev,
(struct kone_settings const *)buf);
- if (!retval)
- memcpy(&kone->settings, buf,
- sizeof(struct kone_settings));
- }
- mutex_unlock(&kone->kone_lock);
+ if (retval) {
+ mutex_unlock(&kone->kone_lock);
+ return retval;
+ }
- if (retval)
- return retval;
+ old_profile = kone->settings.startup_profile;
+ memcpy(&kone->settings, buf, sizeof(struct kone_settings));
- /*
- * If we get here, treat settings as okay and update actual values
- * according to startup_profile
- */
- kone->actual_profile = kone->settings.startup_profile;
- kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi;
+ kone_profile_activated(kone, kone->settings.startup_profile);
+
+ if (kone->settings.startup_profile != old_profile)
+ kone_profile_report(kone, kone->settings.startup_profile);
+ }
+ mutex_unlock(&kone->kone_lock);
return sizeof(struct kone_settings);
}
@@ -501,6 +515,8 @@ static ssize_t kone_sysfs_set_tcu(struct device *dev,
goto exit_no_settings;
goto exit_unlock;
}
+ /* calibration resets profile */
+ kone_profile_activated(kone, kone->settings.startup_profile);
}
retval = size;
@@ -544,16 +560,16 @@ static ssize_t kone_sysfs_set_startup_profile(struct device *dev,
kone_set_settings_checksum(&kone->settings);
retval = kone_set_settings(usb_dev, &kone->settings);
-
- mutex_unlock(&kone->kone_lock);
-
- if (retval)
+ if (retval) {
+ mutex_unlock(&kone->kone_lock);
return retval;
+ }
/* changing the startup profile immediately activates this profile */
- kone->actual_profile = new_startup_profile;
- kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi;
+ kone_profile_activated(kone, new_startup_profile);
+ kone_profile_report(kone, new_startup_profile);
+ mutex_unlock(&kone->kone_lock);
return size;
}
@@ -665,8 +681,7 @@ static int kone_init_kone_device_struct(struct usb_device *usb_dev,
if (retval)
return retval;
- kone->actual_profile = kone->settings.startup_profile;
- kone->actual_dpi = kone->profiles[kone->actual_profile].startup_dpi;
+ kone_profile_activated(kone, kone->settings.startup_profile);
return 0;
}
@@ -776,10 +791,10 @@ static void kone_keep_values_up_to_date(struct kone_device *kone,
{
switch (event->event) {
case kone_mouse_event_switch_profile:
+ kone->actual_dpi = kone->profiles[event->value - 1].
+ startup_dpi;
case kone_mouse_event_osd_profile:
kone->actual_profile = event->value;
- kone->actual_dpi = kone->profiles[kone->actual_profile - 1].
- startup_dpi;
break;
case kone_mouse_event_switch_dpi:
case kone_mouse_event_osd_dpi:
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 1f8336e3f58..112d934132c 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -323,6 +323,7 @@ static ssize_t kovaplus_sysfs_set_actual_profile(struct device *dev,
struct usb_device *usb_dev;
unsigned long profile;
int retval;
+ struct kovaplus_roccat_report roccat_report;
dev = dev->parent->parent;
kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
@@ -337,10 +338,22 @@ static ssize_t kovaplus_sysfs_set_actual_profile(struct device *dev,
mutex_lock(&kovaplus->kovaplus_lock);
retval = kovaplus_set_actual_profile(usb_dev, profile);
+ if (retval) {
+ mutex_unlock(&kovaplus->kovaplus_lock);
+ return retval;
+ }
+
kovaplus_profile_activated(kovaplus, profile);
+
+ roccat_report.type = KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_1;
+ roccat_report.profile = profile + 1;
+ roccat_report.button = 0;
+ roccat_report.data1 = profile + 1;
+ roccat_report.data2 = 0;
+ roccat_report_event(kovaplus->chrdev_minor,
+ (uint8_t const *)&roccat_report);
+
mutex_unlock(&kovaplus->kovaplus_lock);
- if (retval)
- return retval;
return size;
}
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 8140776bd8c..df05c1b1064 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -298,6 +298,7 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
int retval = 0;
int difference;
+ struct pyra_roccat_report roccat_report;
if (off != 0 || count != sizeof(struct pyra_settings))
return -EINVAL;
@@ -307,17 +308,23 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
if (difference) {
retval = pyra_set_settings(usb_dev,
(struct pyra_settings const *)buf);
- if (!retval)
- memcpy(&pyra->settings, buf,
- sizeof(struct pyra_settings));
- }
- mutex_unlock(&pyra->pyra_lock);
+ if (retval) {
+ mutex_unlock(&pyra->pyra_lock);
+ return retval;
+ }
- if (retval)
- return retval;
+ memcpy(&pyra->settings, buf,
+ sizeof(struct pyra_settings));
- profile_activated(pyra, pyra->settings.startup_profile);
+ profile_activated(pyra, pyra->settings.startup_profile);
+ roccat_report.type = PYRA_MOUSE_EVENT_BUTTON_TYPE_PROFILE_2;
+ roccat_report.value = pyra->settings.startup_profile + 1;
+ roccat_report.key = 0;
+ roccat_report_event(pyra->chrdev_minor,
+ (uint8_t const *)&roccat_report);
+ }
+ mutex_unlock(&pyra->pyra_lock);
return sizeof(struct pyra_settings);
}
diff --git a/drivers/hid/hid-sjoy.c b/drivers/hid/hid-sjoy.c
index 16f7cafc969..670da9109f8 100644
--- a/drivers/hid/hid-sjoy.c
+++ b/drivers/hid/hid-sjoy.c
@@ -65,8 +65,7 @@ static int sjoyff_init(struct hid_device *hid)
{
struct sjoyff_device *sjoyff;
struct hid_report *report;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct list_head *report_ptr = report_list;
@@ -78,43 +77,45 @@ static int sjoyff_init(struct hid_device *hid)
return -ENODEV;
}
- report_ptr = report_ptr->next;
+ list_for_each_entry(hidinput, &hid->inputs, list) {
+ report_ptr = report_ptr->next;
- if (report_ptr == report_list) {
- hid_err(hid, "required output report is missing\n");
- return -ENODEV;
- }
+ if (report_ptr == report_list) {
+ hid_err(hid, "required output report is missing\n");
+ return -ENODEV;
+ }
- report = list_entry(report_ptr, struct hid_report, list);
- if (report->maxfield < 1) {
- hid_err(hid, "no fields in the report\n");
- return -ENODEV;
- }
+ report = list_entry(report_ptr, struct hid_report, list);
+ if (report->maxfield < 1) {
+ hid_err(hid, "no fields in the report\n");
+ return -ENODEV;
+ }
- if (report->field[0]->report_count < 3) {
- hid_err(hid, "not enough values in the field\n");
- return -ENODEV;
- }
+ if (report->field[0]->report_count < 3) {
+ hid_err(hid, "not enough values in the field\n");
+ return -ENODEV;
+ }
- sjoyff = kzalloc(sizeof(struct sjoyff_device), GFP_KERNEL);
- if (!sjoyff)
- return -ENOMEM;
+ sjoyff = kzalloc(sizeof(struct sjoyff_device), GFP_KERNEL);
+ if (!sjoyff)
+ return -ENOMEM;
- dev = hidinput->input;
+ dev = hidinput->input;
- set_bit(FF_RUMBLE, dev->ffbit);
+ set_bit(FF_RUMBLE, dev->ffbit);
- error = input_ff_create_memless(dev, sjoyff, hid_sjoyff_play);
- if (error) {
- kfree(sjoyff);
- return error;
- }
+ error = input_ff_create_memless(dev, sjoyff, hid_sjoyff_play);
+ if (error) {
+ kfree(sjoyff);
+ return error;
+ }
- sjoyff->report = report;
- sjoyff->report->field[0]->value[0] = 0x01;
- sjoyff->report->field[0]->value[1] = 0x00;
- sjoyff->report->field[0]->value[2] = 0x00;
- usbhid_submit_report(hid, sjoyff->report, USB_DIR_OUT);
+ sjoyff->report = report;
+ sjoyff->report->field[0]->value[0] = 0x01;
+ sjoyff->report->field[0]->value[1] = 0x00;
+ sjoyff->report->field[0]->value[2] = 0x00;
+ usbhid_submit_report(hid, sjoyff->report, USB_DIR_OUT);
+ }
hid_info(hid, "Force feedback for SmartJoy PLUS PS2/USB adapter\n");
@@ -131,6 +132,8 @@ static int sjoy_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
+ hdev->quirks |= id->driver_data;
+
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
@@ -151,7 +154,17 @@ err:
}
static const struct hid_device_id sjoy_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO),
+ .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET |
+ HID_QUIRK_SKIP_OUTPUT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO),
+ .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET |
+ HID_QUIRK_SKIP_OUTPUT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD),
+ .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET |
+ HID_QUIRK_SKIP_OUTPUT_REPORTS },
{ }
};
MODULE_DEVICE_TABLE(hid, sjoy_devices);
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index a597039d075..17bb88f782b 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -304,11 +304,51 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
return 1;
}
+static int wacom_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage, unsigned long **bit,
+ int *max)
+{
+ struct input_dev *input = hi->input;
+
+ __set_bit(INPUT_PROP_POINTER, input->propbit);
+
+ /* Basics */
+ input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL);
+
+ __set_bit(REL_WHEEL, input->relbit);
+
+ __set_bit(BTN_TOOL_PEN, input->keybit);
+ __set_bit(BTN_TOUCH, input->keybit);
+ __set_bit(BTN_STYLUS, input->keybit);
+ __set_bit(BTN_STYLUS2, input->keybit);
+ __set_bit(BTN_LEFT, input->keybit);
+ __set_bit(BTN_RIGHT, input->keybit);
+ __set_bit(BTN_MIDDLE, input->keybit);
+
+ /* Pad */
+ input->evbit[0] |= BIT(EV_MSC);
+
+ __set_bit(MSC_SERIAL, input->mscbit);
+
+ __set_bit(BTN_0, input->keybit);
+ __set_bit(BTN_1, input->keybit);
+ __set_bit(BTN_TOOL_FINGER, input->keybit);
+
+ /* Distance, rubber and mouse */
+ __set_bit(BTN_TOOL_RUBBER, input->keybit);
+ __set_bit(BTN_TOOL_MOUSE, input->keybit);
+
+ input_set_abs_params(input, ABS_X, 0, 16704, 4, 0);
+ input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0);
+ input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0);
+
+ return 0;
+}
+
static int wacom_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
- struct hid_input *hidinput;
- struct input_dev *input;
struct wacom_data *wdata;
int ret;
@@ -370,40 +410,6 @@ static int wacom_probe(struct hid_device *hdev,
goto err_ac;
}
#endif
- hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
- input = hidinput->input;
-
- /* Basics */
- input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL);
-
- __set_bit(REL_WHEEL, input->relbit);
-
- __set_bit(BTN_TOOL_PEN, input->keybit);
- __set_bit(BTN_TOUCH, input->keybit);
- __set_bit(BTN_STYLUS, input->keybit);
- __set_bit(BTN_STYLUS2, input->keybit);
- __set_bit(BTN_LEFT, input->keybit);
- __set_bit(BTN_RIGHT, input->keybit);
- __set_bit(BTN_MIDDLE, input->keybit);
-
- /* Pad */
- input->evbit[0] |= BIT(EV_MSC);
-
- __set_bit(MSC_SERIAL, input->mscbit);
-
- __set_bit(BTN_0, input->keybit);
- __set_bit(BTN_1, input->keybit);
- __set_bit(BTN_TOOL_FINGER, input->keybit);
-
- /* Distance, rubber and mouse */
- __set_bit(BTN_TOOL_RUBBER, input->keybit);
- __set_bit(BTN_TOOL_MOUSE, input->keybit);
-
- input_set_abs_params(input, ABS_X, 0, 16704, 4, 0);
- input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0);
- input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0);
- input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0);
-
return 0;
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
@@ -446,6 +452,7 @@ static struct hid_driver wacom_driver = {
.probe = wacom_probe,
.remove = wacom_remove,
.raw_event = wacom_raw_event,
+ .input_mapped = wacom_input_mapped,
};
static int __init wacom_init(void)
diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c
index 85a02e5f9fe..76739c07fa3 100644
--- a/drivers/hid/hid-wiimote.c
+++ b/drivers/hid/hid-wiimote.c
@@ -10,15 +10,18 @@
* any later version.
*/
+#include <linux/completion.h>
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/input.h>
#include <linux/leds.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
#include <linux/spinlock.h>
#include "hid-ids.h"
-#define WIIMOTE_VERSION "0.1"
+#define WIIMOTE_VERSION "0.2"
#define WIIMOTE_NAME "Nintendo Wii Remote"
#define WIIMOTE_BUFSIZE 32
@@ -30,12 +33,26 @@ struct wiimote_buf {
struct wiimote_state {
spinlock_t lock;
__u8 flags;
+ __u8 accel_split[2];
+
+ /* synchronous cmd requests */
+ struct mutex sync;
+ struct completion ready;
+ int cmd;
+ __u32 opt;
+
+ /* results of synchronous requests */
+ __u8 cmd_battery;
+ __u8 cmd_err;
};
struct wiimote_data {
struct hid_device *hdev;
struct input_dev *input;
struct led_classdev *leds[4];
+ struct input_dev *accel;
+ struct input_dev *ir;
+ struct power_supply battery;
spinlock_t qlock;
__u8 head;
@@ -46,23 +63,47 @@ struct wiimote_data {
struct wiimote_state state;
};
-#define WIIPROTO_FLAG_LED1 0x01
-#define WIIPROTO_FLAG_LED2 0x02
-#define WIIPROTO_FLAG_LED3 0x04
-#define WIIPROTO_FLAG_LED4 0x08
+#define WIIPROTO_FLAG_LED1 0x01
+#define WIIPROTO_FLAG_LED2 0x02
+#define WIIPROTO_FLAG_LED3 0x04
+#define WIIPROTO_FLAG_LED4 0x08
+#define WIIPROTO_FLAG_RUMBLE 0x10
+#define WIIPROTO_FLAG_ACCEL 0x20
+#define WIIPROTO_FLAG_IR_BASIC 0x40
+#define WIIPROTO_FLAG_IR_EXT 0x80
+#define WIIPROTO_FLAG_IR_FULL 0xc0 /* IR_BASIC | IR_EXT */
#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
+#define WIIPROTO_FLAGS_IR (WIIPROTO_FLAG_IR_BASIC | WIIPROTO_FLAG_IR_EXT | \
+ WIIPROTO_FLAG_IR_FULL)
/* return flag for led \num */
#define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1))
enum wiiproto_reqs {
WIIPROTO_REQ_NULL = 0x0,
+ WIIPROTO_REQ_RUMBLE = 0x10,
WIIPROTO_REQ_LED = 0x11,
WIIPROTO_REQ_DRM = 0x12,
+ WIIPROTO_REQ_IR1 = 0x13,
+ WIIPROTO_REQ_SREQ = 0x15,
+ WIIPROTO_REQ_WMEM = 0x16,
+ WIIPROTO_REQ_RMEM = 0x17,
+ WIIPROTO_REQ_IR2 = 0x1a,
WIIPROTO_REQ_STATUS = 0x20,
+ WIIPROTO_REQ_DATA = 0x21,
WIIPROTO_REQ_RETURN = 0x22,
WIIPROTO_REQ_DRM_K = 0x30,
+ WIIPROTO_REQ_DRM_KA = 0x31,
+ WIIPROTO_REQ_DRM_KE = 0x32,
+ WIIPROTO_REQ_DRM_KAI = 0x33,
+ WIIPROTO_REQ_DRM_KEE = 0x34,
+ WIIPROTO_REQ_DRM_KAE = 0x35,
+ WIIPROTO_REQ_DRM_KIE = 0x36,
+ WIIPROTO_REQ_DRM_KAIE = 0x37,
+ WIIPROTO_REQ_DRM_E = 0x3d,
+ WIIPROTO_REQ_DRM_SKAI1 = 0x3e,
+ WIIPROTO_REQ_DRM_SKAI2 = 0x3f,
};
enum wiiproto_keys {
@@ -94,6 +135,56 @@ static __u16 wiiproto_keymap[] = {
BTN_MODE, /* WIIPROTO_KEY_HOME */
};
+static enum power_supply_property wiimote_battery_props[] = {
+ POWER_SUPPLY_PROP_CAPACITY
+};
+
+/* requires the state.lock spinlock to be held */
+static inline bool wiimote_cmd_pending(struct wiimote_data *wdata, int cmd,
+ __u32 opt)
+{
+ return wdata->state.cmd == cmd && wdata->state.opt == opt;
+}
+
+/* requires the state.lock spinlock to be held */
+static inline void wiimote_cmd_complete(struct wiimote_data *wdata)
+{
+ wdata->state.cmd = WIIPROTO_REQ_NULL;
+ complete(&wdata->state.ready);
+}
+
+static inline int wiimote_cmd_acquire(struct wiimote_data *wdata)
+{
+ return mutex_lock_interruptible(&wdata->state.sync) ? -ERESTARTSYS : 0;
+}
+
+/* requires the state.lock spinlock to be held */
+static inline void wiimote_cmd_set(struct wiimote_data *wdata, int cmd,
+ __u32 opt)
+{
+ INIT_COMPLETION(wdata->state.ready);
+ wdata->state.cmd = cmd;
+ wdata->state.opt = opt;
+}
+
+static inline void wiimote_cmd_release(struct wiimote_data *wdata)
+{
+ mutex_unlock(&wdata->state.sync);
+}
+
+static inline int wiimote_cmd_wait(struct wiimote_data *wdata)
+{
+ int ret;
+
+ ret = wait_for_completion_interruptible_timeout(&wdata->state.ready, HZ);
+ if (ret < 0)
+ return -ERESTARTSYS;
+ else if (ret == 0)
+ return -EIO;
+ else
+ return 0;
+}
+
static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,
size_t count)
{
@@ -172,6 +263,39 @@ static void wiimote_queue(struct wiimote_data *wdata, const __u8 *buffer,
spin_unlock_irqrestore(&wdata->qlock, flags);
}
+/*
+ * This sets the rumble bit on the given output report if rumble is
+ * currently enabled.
+ * \cmd1 must point to the second byte in the output report => &cmd[1]
+ * This must be called on nearly every output report before passing it
+ * into the output queue!
+ */
+static inline void wiiproto_keep_rumble(struct wiimote_data *wdata, __u8 *cmd1)
+{
+ if (wdata->state.flags & WIIPROTO_FLAG_RUMBLE)
+ *cmd1 |= 0x01;
+}
+
+static void wiiproto_req_rumble(struct wiimote_data *wdata, __u8 rumble)
+{
+ __u8 cmd[2];
+
+ rumble = !!rumble;
+ if (rumble == !!(wdata->state.flags & WIIPROTO_FLAG_RUMBLE))
+ return;
+
+ if (rumble)
+ wdata->state.flags |= WIIPROTO_FLAG_RUMBLE;
+ else
+ wdata->state.flags &= ~WIIPROTO_FLAG_RUMBLE;
+
+ cmd[0] = WIIPROTO_REQ_RUMBLE;
+ cmd[1] = 0;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
{
__u8 cmd[2];
@@ -193,6 +317,7 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
if (leds & WIIPROTO_FLAG_LED4)
cmd[1] |= 0x80;
+ wiiproto_keep_rumble(wdata, &cmd[1]);
wiimote_queue(wdata, cmd, sizeof(cmd));
}
@@ -203,7 +328,23 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
*/
static __u8 select_drm(struct wiimote_data *wdata)
{
- return WIIPROTO_REQ_DRM_K;
+ __u8 ir = wdata->state.flags & WIIPROTO_FLAGS_IR;
+
+ if (ir == WIIPROTO_FLAG_IR_BASIC) {
+ if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
+ return WIIPROTO_REQ_DRM_KAIE;
+ else
+ return WIIPROTO_REQ_DRM_KIE;
+ } else if (ir == WIIPROTO_FLAG_IR_EXT) {
+ return WIIPROTO_REQ_DRM_KAI;
+ } else if (ir == WIIPROTO_FLAG_IR_FULL) {
+ return WIIPROTO_REQ_DRM_SKAI1;
+ } else {
+ if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
+ return WIIPROTO_REQ_DRM_KA;
+ else
+ return WIIPROTO_REQ_DRM_K;
+ }
}
static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
@@ -217,9 +358,256 @@ static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
cmd[1] = 0;
cmd[2] = drm;
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+static void wiiproto_req_status(struct wiimote_data *wdata)
+{
+ __u8 cmd[2];
+
+ cmd[0] = WIIPROTO_REQ_SREQ;
+ cmd[1] = 0;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+static void wiiproto_req_accel(struct wiimote_data *wdata, __u8 accel)
+{
+ accel = !!accel;
+ if (accel == !!(wdata->state.flags & WIIPROTO_FLAG_ACCEL))
+ return;
+
+ if (accel)
+ wdata->state.flags |= WIIPROTO_FLAG_ACCEL;
+ else
+ wdata->state.flags &= ~WIIPROTO_FLAG_ACCEL;
+
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+}
+
+static void wiiproto_req_ir1(struct wiimote_data *wdata, __u8 flags)
+{
+ __u8 cmd[2];
+
+ cmd[0] = WIIPROTO_REQ_IR1;
+ cmd[1] = flags;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+static void wiiproto_req_ir2(struct wiimote_data *wdata, __u8 flags)
+{
+ __u8 cmd[2];
+
+ cmd[0] = WIIPROTO_REQ_IR2;
+ cmd[1] = flags;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+#define wiiproto_req_wreg(wdata, os, buf, sz) \
+ wiiproto_req_wmem((wdata), false, (os), (buf), (sz))
+
+#define wiiproto_req_weeprom(wdata, os, buf, sz) \
+ wiiproto_req_wmem((wdata), true, (os), (buf), (sz))
+
+static void wiiproto_req_wmem(struct wiimote_data *wdata, bool eeprom,
+ __u32 offset, const __u8 *buf, __u8 size)
+{
+ __u8 cmd[22];
+
+ if (size > 16 || size == 0) {
+ hid_warn(wdata->hdev, "Invalid length %d wmem request\n", size);
+ return;
+ }
+
+ memset(cmd, 0, sizeof(cmd));
+ cmd[0] = WIIPROTO_REQ_WMEM;
+ cmd[2] = (offset >> 16) & 0xff;
+ cmd[3] = (offset >> 8) & 0xff;
+ cmd[4] = offset & 0xff;
+ cmd[5] = size;
+ memcpy(&cmd[6], buf, size);
+
+ if (!eeprom)
+ cmd[1] |= 0x04;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
wiimote_queue(wdata, cmd, sizeof(cmd));
}
+/* requries the cmd-mutex to be held */
+static int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
+ const __u8 *wmem, __u8 size)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_WMEM, 0);
+ wiiproto_req_wreg(wdata, offset, wmem, size);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+ if (!ret && wdata->state.cmd_err)
+ ret = -EIO;
+
+ return ret;
+}
+
+static int wiimote_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wiimote_data *wdata = container_of(psy,
+ struct wiimote_data, battery);
+ int ret = 0, state;
+ unsigned long flags;
+
+ ret = wiimote_cmd_acquire(wdata);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_SREQ, 0);
+ wiiproto_req_status(wdata);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+ state = wdata->state.cmd_battery;
+ wiimote_cmd_release(wdata);
+
+ if (ret)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = state * 100 / 255;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int wiimote_init_ir(struct wiimote_data *wdata, __u16 mode)
+{
+ int ret;
+ unsigned long flags;
+ __u8 format = 0;
+ static const __u8 data_enable[] = { 0x01 };
+ static const __u8 data_sens1[] = { 0x02, 0x00, 0x00, 0x71, 0x01,
+ 0x00, 0xaa, 0x00, 0x64 };
+ static const __u8 data_sens2[] = { 0x63, 0x03 };
+ static const __u8 data_fin[] = { 0x08 };
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+
+ if (mode == (wdata->state.flags & WIIPROTO_FLAGS_IR)) {
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+ return 0;
+ }
+
+ if (mode == 0) {
+ wdata->state.flags &= ~WIIPROTO_FLAGS_IR;
+ wiiproto_req_ir1(wdata, 0);
+ wiiproto_req_ir2(wdata, 0);
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+ return 0;
+ }
+
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_acquire(wdata);
+ if (ret)
+ return ret;
+
+ /* send PIXEL CLOCK ENABLE cmd first */
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_IR1, 0);
+ wiiproto_req_ir1(wdata, 0x06);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+ if (ret)
+ goto unlock;
+ if (wdata->state.cmd_err) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* enable IR LOGIC */
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_IR2, 0);
+ wiiproto_req_ir2(wdata, 0x06);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+ if (ret)
+ goto unlock;
+ if (wdata->state.cmd_err) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* enable IR cam but do not make it send data, yet */
+ ret = wiimote_cmd_write(wdata, 0xb00030, data_enable,
+ sizeof(data_enable));
+ if (ret)
+ goto unlock;
+
+ /* write first sensitivity block */
+ ret = wiimote_cmd_write(wdata, 0xb00000, data_sens1,
+ sizeof(data_sens1));
+ if (ret)
+ goto unlock;
+
+ /* write second sensitivity block */
+ ret = wiimote_cmd_write(wdata, 0xb0001a, data_sens2,
+ sizeof(data_sens2));
+ if (ret)
+ goto unlock;
+
+ /* put IR cam into desired state */
+ switch (mode) {
+ case WIIPROTO_FLAG_IR_FULL:
+ format = 5;
+ break;
+ case WIIPROTO_FLAG_IR_EXT:
+ format = 3;
+ break;
+ case WIIPROTO_FLAG_IR_BASIC:
+ format = 1;
+ break;
+ }
+ ret = wiimote_cmd_write(wdata, 0xb00033, &format, sizeof(format));
+ if (ret)
+ goto unlock;
+
+ /* make IR cam send data */
+ ret = wiimote_cmd_write(wdata, 0xb00030, data_fin, sizeof(data_fin));
+ if (ret)
+ goto unlock;
+
+ /* request new DRM mode compatible to IR mode */
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.flags &= ~WIIPROTO_FLAGS_IR;
+ wdata->state.flags |= mode & WIIPROTO_FLAGS_IR;
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+unlock:
+ wiimote_cmd_release(wdata);
+ return ret;
+}
+
static enum led_brightness wiimote_leds_get(struct led_classdev *led_dev)
{
struct wiimote_data *wdata;
@@ -268,9 +656,28 @@ static void wiimote_leds_set(struct led_classdev *led_dev,
}
}
-static int wiimote_input_event(struct input_dev *dev, unsigned int type,
- unsigned int code, int value)
+static int wiimote_ff_play(struct input_dev *dev, void *data,
+ struct ff_effect *eff)
{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ __u8 value;
+ unsigned long flags;
+
+ /*
+ * The wiimote supports only a single rumble motor so if any magnitude
+ * is set to non-zero then we start the rumble motor. If both are set to
+ * zero, we stop the rumble motor.
+ */
+
+ if (eff->u.rumble.strong_magnitude || eff->u.rumble.weak_magnitude)
+ value = 1;
+ else
+ value = 0;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiiproto_req_rumble(wdata, value);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
return 0;
}
@@ -288,6 +695,61 @@ static void wiimote_input_close(struct input_dev *dev)
hid_hw_close(wdata->hdev);
}
+static int wiimote_accel_open(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ int ret;
+ unsigned long flags;
+
+ ret = hid_hw_open(wdata->hdev);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiiproto_req_accel(wdata, true);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ return 0;
+}
+
+static void wiimote_accel_close(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiiproto_req_accel(wdata, false);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ hid_hw_close(wdata->hdev);
+}
+
+static int wiimote_ir_open(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ int ret;
+
+ ret = hid_hw_open(wdata->hdev);
+ if (ret)
+ return ret;
+
+ ret = wiimote_init_ir(wdata, WIIPROTO_FLAG_IR_BASIC);
+ if (ret) {
+ hid_hw_close(wdata->hdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void wiimote_ir_close(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+
+ wiimote_init_ir(wdata, 0);
+ hid_hw_close(wdata->hdev);
+}
+
static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
{
input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_LEFT],
@@ -315,12 +777,100 @@ static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
input_sync(wdata->input);
}
+static void handler_accel(struct wiimote_data *wdata, const __u8 *payload)
+{
+ __u16 x, y, z;
+
+ if (!(wdata->state.flags & WIIPROTO_FLAG_ACCEL))
+ return;
+
+ /*
+ * payload is: BB BB XX YY ZZ
+ * Accelerometer data is encoded into 3 10bit values. XX, YY and ZZ
+ * contain the upper 8 bits of each value. The lower 2 bits are
+ * contained in the buttons data BB BB.
+ * Bits 6 and 7 of the first buttons byte BB is the lower 2 bits of the
+ * X accel value. Bit 5 of the second buttons byte is the 2nd bit of Y
+ * accel value and bit 6 is the second bit of the Z value.
+ * The first bit of Y and Z values is not available and always set to 0.
+ * 0x200 is returned on no movement.
+ */
+
+ x = payload[2] << 2;
+ y = payload[3] << 2;
+ z = payload[4] << 2;
+
+ x |= (payload[0] >> 5) & 0x3;
+ y |= (payload[1] >> 4) & 0x2;
+ z |= (payload[1] >> 5) & 0x2;
+
+ input_report_abs(wdata->accel, ABS_RX, x - 0x200);
+ input_report_abs(wdata->accel, ABS_RY, y - 0x200);
+ input_report_abs(wdata->accel, ABS_RZ, z - 0x200);
+ input_sync(wdata->accel);
+}
+
+#define ir_to_input0(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+ ABS_HAT0X, ABS_HAT0Y)
+#define ir_to_input1(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+ ABS_HAT1X, ABS_HAT1Y)
+#define ir_to_input2(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+ ABS_HAT2X, ABS_HAT2Y)
+#define ir_to_input3(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+ ABS_HAT3X, ABS_HAT3Y)
+
+static void __ir_to_input(struct wiimote_data *wdata, const __u8 *ir,
+ bool packed, __u8 xid, __u8 yid)
+{
+ __u16 x, y;
+
+ if (!(wdata->state.flags & WIIPROTO_FLAGS_IR))
+ return;
+
+ /*
+ * Basic IR data is encoded into 3 bytes. The first two bytes are the
+ * upper 8 bit of the X/Y data, the 3rd byte contains the lower 2 bits
+ * of both.
+ * If data is packed, then the 3rd byte is put first and slightly
+ * reordered. This allows to interleave packed and non-packed data to
+ * have two IR sets in 5 bytes instead of 6.
+ * The resulting 10bit X/Y values are passed to the ABS_HATXY input dev.
+ */
+
+ if (packed) {
+ x = ir[1] << 2;
+ y = ir[2] << 2;
+
+ x |= ir[0] & 0x3;
+ y |= (ir[0] >> 2) & 0x3;
+ } else {
+ x = ir[0] << 2;
+ y = ir[1] << 2;
+
+ x |= (ir[2] >> 4) & 0x3;
+ y |= (ir[2] >> 6) & 0x3;
+ }
+
+ input_report_abs(wdata->ir, xid, x);
+ input_report_abs(wdata->ir, yid, y);
+}
+
static void handler_status(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
/* on status reports the drm is reset so we need to resend the drm */
wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+
+ if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_SREQ, 0)) {
+ wdata->state.cmd_battery = payload[5];
+ wiimote_cmd_complete(wdata);
+ }
+}
+
+static void handler_data(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
}
static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
@@ -330,9 +880,105 @@ static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
handler_keys(wdata, payload);
- if (err)
+ if (wiimote_cmd_pending(wdata, cmd, 0)) {
+ wdata->state.cmd_err = err;
+ wiimote_cmd_complete(wdata);
+ } else if (err) {
hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err,
cmd);
+ }
+}
+
+static void handler_drm_KA(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+ handler_accel(wdata, payload);
+}
+
+static void handler_drm_KE(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+}
+
+static void handler_drm_KAI(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+ handler_accel(wdata, payload);
+ ir_to_input0(wdata, &payload[5], false);
+ ir_to_input1(wdata, &payload[8], false);
+ ir_to_input2(wdata, &payload[11], false);
+ ir_to_input3(wdata, &payload[14], false);
+ input_sync(wdata->ir);
+}
+
+static void handler_drm_KEE(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+}
+
+static void handler_drm_KIE(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+ ir_to_input0(wdata, &payload[2], false);
+ ir_to_input1(wdata, &payload[4], true);
+ ir_to_input2(wdata, &payload[7], false);
+ ir_to_input3(wdata, &payload[9], true);
+ input_sync(wdata->ir);
+}
+
+static void handler_drm_KAE(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+ handler_accel(wdata, payload);
+}
+
+static void handler_drm_KAIE(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+ handler_accel(wdata, payload);
+ ir_to_input0(wdata, &payload[5], false);
+ ir_to_input1(wdata, &payload[7], true);
+ ir_to_input2(wdata, &payload[10], false);
+ ir_to_input3(wdata, &payload[12], true);
+ input_sync(wdata->ir);
+}
+
+static void handler_drm_E(struct wiimote_data *wdata, const __u8 *payload)
+{
+}
+
+static void handler_drm_SKAI1(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+
+ wdata->state.accel_split[0] = payload[2];
+ wdata->state.accel_split[1] = (payload[0] >> 1) & (0x10 | 0x20);
+ wdata->state.accel_split[1] |= (payload[1] << 1) & (0x40 | 0x80);
+
+ ir_to_input0(wdata, &payload[3], false);
+ ir_to_input1(wdata, &payload[12], false);
+ input_sync(wdata->ir);
+}
+
+static void handler_drm_SKAI2(struct wiimote_data *wdata, const __u8 *payload)
+{
+ __u8 buf[5];
+
+ handler_keys(wdata, payload);
+
+ wdata->state.accel_split[1] |= (payload[0] >> 5) & (0x01 | 0x02);
+ wdata->state.accel_split[1] |= (payload[1] >> 3) & (0x04 | 0x08);
+
+ buf[0] = 0;
+ buf[1] = 0;
+ buf[2] = wdata->state.accel_split[0];
+ buf[3] = payload[2];
+ buf[4] = wdata->state.accel_split[1];
+ handler_accel(wdata, buf);
+
+ ir_to_input2(wdata, &payload[3], false);
+ ir_to_input3(wdata, &payload[12], false);
+ input_sync(wdata->ir);
}
struct wiiproto_handler {
@@ -343,8 +989,19 @@ struct wiiproto_handler {
static struct wiiproto_handler handlers[] = {
{ .id = WIIPROTO_REQ_STATUS, .size = 6, .func = handler_status },
+ { .id = WIIPROTO_REQ_DATA, .size = 21, .func = handler_data },
{ .id = WIIPROTO_REQ_RETURN, .size = 4, .func = handler_return },
{ .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys },
+ { .id = WIIPROTO_REQ_DRM_KA, .size = 5, .func = handler_drm_KA },
+ { .id = WIIPROTO_REQ_DRM_KE, .size = 10, .func = handler_drm_KE },
+ { .id = WIIPROTO_REQ_DRM_KAI, .size = 17, .func = handler_drm_KAI },
+ { .id = WIIPROTO_REQ_DRM_KEE, .size = 21, .func = handler_drm_KEE },
+ { .id = WIIPROTO_REQ_DRM_KAE, .size = 21, .func = handler_drm_KAE },
+ { .id = WIIPROTO_REQ_DRM_KIE, .size = 21, .func = handler_drm_KIE },
+ { .id = WIIPROTO_REQ_DRM_KAIE, .size = 21, .func = handler_drm_KAIE },
+ { .id = WIIPROTO_REQ_DRM_E, .size = 21, .func = handler_drm_E },
+ { .id = WIIPROTO_REQ_DRM_SKAI1, .size = 21, .func = handler_drm_SKAI1 },
+ { .id = WIIPROTO_REQ_DRM_SKAI2, .size = 21, .func = handler_drm_SKAI2 },
{ .id = 0 }
};
@@ -355,6 +1012,7 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
struct wiiproto_handler *h;
int i;
unsigned long flags;
+ bool handled = false;
if (size < 1)
return -EINVAL;
@@ -363,10 +1021,16 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
for (i = 0; handlers[i].id; ++i) {
h = &handlers[i];
- if (h->id == raw_data[0] && h->size < size)
+ if (h->id == raw_data[0] && h->size < size) {
h->func(wdata, &raw_data[1]);
+ handled = true;
+ }
}
+ if (!handled)
+ hid_warn(hdev, "Unhandled report %hhu size %d\n", raw_data[0],
+ size);
+
spin_unlock_irqrestore(&wdata->state.lock, flags);
return 0;
@@ -434,16 +1098,13 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
return NULL;
wdata->input = input_allocate_device();
- if (!wdata->input) {
- kfree(wdata);
- return NULL;
- }
+ if (!wdata->input)
+ goto err;
wdata->hdev = hdev;
hid_set_drvdata(hdev, wdata);
input_set_drvdata(wdata->input, wdata);
- wdata->input->event = wiimote_input_event;
wdata->input->open = wiimote_input_open;
wdata->input->close = wiimote_input_close;
wdata->input->dev.parent = &wdata->hdev->dev;
@@ -457,18 +1118,89 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
for (i = 0; i < WIIPROTO_KEY_COUNT; ++i)
set_bit(wiiproto_keymap[i], wdata->input->keybit);
+ set_bit(FF_RUMBLE, wdata->input->ffbit);
+ if (input_ff_create_memless(wdata->input, NULL, wiimote_ff_play))
+ goto err_input;
+
+ wdata->accel = input_allocate_device();
+ if (!wdata->accel)
+ goto err_input;
+
+ input_set_drvdata(wdata->accel, wdata);
+ wdata->accel->open = wiimote_accel_open;
+ wdata->accel->close = wiimote_accel_close;
+ wdata->accel->dev.parent = &wdata->hdev->dev;
+ wdata->accel->id.bustype = wdata->hdev->bus;
+ wdata->accel->id.vendor = wdata->hdev->vendor;
+ wdata->accel->id.product = wdata->hdev->product;
+ wdata->accel->id.version = wdata->hdev->version;
+ wdata->accel->name = WIIMOTE_NAME " Accelerometer";
+
+ set_bit(EV_ABS, wdata->accel->evbit);
+ set_bit(ABS_RX, wdata->accel->absbit);
+ set_bit(ABS_RY, wdata->accel->absbit);
+ set_bit(ABS_RZ, wdata->accel->absbit);
+ input_set_abs_params(wdata->accel, ABS_RX, -500, 500, 2, 4);
+ input_set_abs_params(wdata->accel, ABS_RY, -500, 500, 2, 4);
+ input_set_abs_params(wdata->accel, ABS_RZ, -500, 500, 2, 4);
+
+ wdata->ir = input_allocate_device();
+ if (!wdata->ir)
+ goto err_ir;
+
+ input_set_drvdata(wdata->ir, wdata);
+ wdata->ir->open = wiimote_ir_open;
+ wdata->ir->close = wiimote_ir_close;
+ wdata->ir->dev.parent = &wdata->hdev->dev;
+ wdata->ir->id.bustype = wdata->hdev->bus;
+ wdata->ir->id.vendor = wdata->hdev->vendor;
+ wdata->ir->id.product = wdata->hdev->product;
+ wdata->ir->id.version = wdata->hdev->version;
+ wdata->ir->name = WIIMOTE_NAME " IR";
+
+ set_bit(EV_ABS, wdata->ir->evbit);
+ set_bit(ABS_HAT0X, wdata->ir->absbit);
+ set_bit(ABS_HAT0Y, wdata->ir->absbit);
+ set_bit(ABS_HAT1X, wdata->ir->absbit);
+ set_bit(ABS_HAT1Y, wdata->ir->absbit);
+ set_bit(ABS_HAT2X, wdata->ir->absbit);
+ set_bit(ABS_HAT2Y, wdata->ir->absbit);
+ set_bit(ABS_HAT3X, wdata->ir->absbit);
+ set_bit(ABS_HAT3Y, wdata->ir->absbit);
+ input_set_abs_params(wdata->ir, ABS_HAT0X, 0, 1023, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT0Y, 0, 767, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT1X, 0, 1023, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT1Y, 0, 767, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT2X, 0, 1023, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT2Y, 0, 767, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT3X, 0, 1023, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT3Y, 0, 767, 2, 4);
+
spin_lock_init(&wdata->qlock);
INIT_WORK(&wdata->worker, wiimote_worker);
spin_lock_init(&wdata->state.lock);
+ init_completion(&wdata->state.ready);
+ mutex_init(&wdata->state.sync);
return wdata;
+
+err_ir:
+ input_free_device(wdata->accel);
+err_input:
+ input_free_device(wdata->input);
+err:
+ kfree(wdata);
+ return NULL;
}
static void wiimote_destroy(struct wiimote_data *wdata)
{
wiimote_leds_destroy(wdata);
+ power_supply_unregister(&wdata->battery);
+ input_unregister_device(wdata->accel);
+ input_unregister_device(wdata->ir);
input_unregister_device(wdata->input);
cancel_work_sync(&wdata->worker);
hid_hw_stop(wdata->hdev);
@@ -500,12 +1232,37 @@ static int wiimote_hid_probe(struct hid_device *hdev,
goto err;
}
- ret = input_register_device(wdata->input);
+ ret = input_register_device(wdata->accel);
if (ret) {
hid_err(hdev, "Cannot register input device\n");
goto err_stop;
}
+ ret = input_register_device(wdata->ir);
+ if (ret) {
+ hid_err(hdev, "Cannot register input device\n");
+ goto err_ir;
+ }
+
+ ret = input_register_device(wdata->input);
+ if (ret) {
+ hid_err(hdev, "Cannot register input device\n");
+ goto err_input;
+ }
+
+ wdata->battery.properties = wiimote_battery_props;
+ wdata->battery.num_properties = ARRAY_SIZE(wiimote_battery_props);
+ wdata->battery.get_property = wiimote_battery_get_property;
+ wdata->battery.name = "wiimote_battery";
+ wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ wdata->battery.use_for_apm = 0;
+
+ ret = power_supply_register(&wdata->hdev->dev, &wdata->battery);
+ if (ret) {
+ hid_err(hdev, "Cannot register battery device\n");
+ goto err_battery;
+ }
+
ret = wiimote_leds_create(wdata);
if (ret)
goto err_free;
@@ -523,9 +1280,20 @@ err_free:
wiimote_destroy(wdata);
return ret;
+err_battery:
+ input_unregister_device(wdata->input);
+ wdata->input = NULL;
+err_input:
+ input_unregister_device(wdata->ir);
+ wdata->ir = NULL;
+err_ir:
+ input_unregister_device(wdata->accel);
+ wdata->accel = NULL;
err_stop:
hid_hw_stop(hdev);
err:
+ input_free_device(wdata->ir);
+ input_free_device(wdata->accel);
input_free_device(wdata->input);
kfree(wdata);
return ret;
diff --git a/drivers/hid/hid-zydacron.c b/drivers/hid/hid-zydacron.c
index e90371508fd..1ad85f2257b 100644
--- a/drivers/hid/hid-zydacron.c
+++ b/drivers/hid/hid-zydacron.c
@@ -201,9 +201,7 @@ static void zc_remove(struct hid_device *hdev)
struct zc_device *zc = hid_get_drvdata(hdev);
hid_hw_stop(hdev);
-
- if (NULL != zc)
- kfree(zc);
+ kfree(zc);
}
static const struct hid_device_id zc_devices[] = {
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index c79578b5a78..cf7d6d58e79 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -259,7 +259,6 @@ static int hidraw_open(struct inode *inode, struct file *file)
mutex_lock(&minors_lock);
if (!hidraw_table[minor]) {
- kfree(list);
err = -ENODEV;
goto out_unlock;
}
@@ -272,8 +271,10 @@ static int hidraw_open(struct inode *inode, struct file *file)
dev = hidraw_table[minor];
if (!dev->open++) {
err = hid_hw_power(dev->hid, PM_HINT_FULLON);
- if (err < 0)
+ if (err < 0) {
+ dev->open--;
goto out_unlock;
+ }
err = hid_hw_open(dev->hid);
if (err < 0) {
@@ -285,6 +286,8 @@ static int hidraw_open(struct inode *inode, struct file *file)
out_unlock:
mutex_unlock(&minors_lock);
out:
+ if (err < 0)
+ kfree(list);
return err;
}
@@ -510,13 +513,12 @@ void hidraw_disconnect(struct hid_device *hid)
{
struct hidraw *hidraw = hid->hidraw;
+ mutex_lock(&minors_lock);
hidraw->exist = 0;
device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
- mutex_lock(&minors_lock);
hidraw_table[hidraw->minor] = NULL;
- mutex_unlock(&minors_lock);
if (hidraw->open) {
hid_hw_close(hid);
@@ -524,6 +526,7 @@ void hidraw_disconnect(struct hid_device *hid)
} else {
kfree(hidraw);
}
+ mutex_unlock(&minors_lock);
}
EXPORT_SYMBOL_GPL(hidraw_disconnect);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ad978f5748d..77e705c2209 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1270,7 +1270,7 @@ static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid)
static void hid_cease_io(struct usbhid_device *usbhid)
{
- del_timer(&usbhid->io_retry);
+ del_timer_sync(&usbhid->io_retry);
usb_kill_urb(usbhid->urbin);
usb_kill_urb(usbhid->urbctrl);
usb_kill_urb(usbhid->urbout);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 3146fdcda27..4ea464151c3 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -80,10 +80,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 7c1188b53c3..4ef02b269a7 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -641,6 +641,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct usb_device *dev = hid_to_usb_dev(hid);
struct usbhid_device *usbhid = hid->driver_data;
+ memset(&dinfo, 0, sizeof(dinfo));
+
dinfo.bustype = BUS_USB;
dinfo.busnum = dev->bus->busnum;
dinfo.devnum = dev->devnum;
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 59d83e83da7..93238378664 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -36,17 +36,25 @@
#include <linux/cpu.h>
#include <linux/pci.h>
#include <linux/smp.h>
+#include <linux/moduleparam.h>
#include <asm/msr.h>
#include <asm/processor.h>
#define DRVNAME "coretemp"
+/*
+ * force_tjmax only matters when TjMax can't be read from the CPU itself.
+ * When set, it replaces the driver's suboptimal heuristic.
+ */
+static int force_tjmax;
+module_param_named(tjmax, force_tjmax, int, 0444);
+MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */
#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
-#define MAX_THRESH_ATTRS 3 /* Maximum no of Threshold attrs */
-#define TOTAL_ATTRS (MAX_CORE_ATTRS + MAX_THRESH_ATTRS)
+#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
#ifdef CONFIG_SMP
@@ -69,8 +77,6 @@
* This value is passed as "id" field to rdmsr/wrmsr functions.
* @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
* from where the temperature values should be read.
- * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT,
- * from where the thresholds are read.
* @attr_size: Total number of pre-core attrs displayed in the sysfs.
* @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
* Otherwise, temp_data holds coretemp data.
@@ -79,13 +85,11 @@
struct temp_data {
int temp;
int ttarget;
- int tmin;
int tjmax;
unsigned long last_updated;
unsigned int cpu;
u32 cpu_core_id;
u32 status_reg;
- u32 intrpt_reg;
int attr_size;
bool is_pkg_data;
bool valid;
@@ -143,19 +147,6 @@ static ssize_t show_crit_alarm(struct device *dev,
return sprintf(buf, "%d\n", (eax >> 5) & 1);
}
-static ssize_t show_max_alarm(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- u32 eax, edx;
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct platform_data *pdata = dev_get_drvdata(dev);
- struct temp_data *tdata = pdata->core_data[attr->index];
-
- rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
-
- return sprintf(buf, "%d\n", !!(eax & THERM_STATUS_THRESHOLD1));
-}
-
static ssize_t show_tjmax(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@@ -174,83 +165,6 @@ static ssize_t show_ttarget(struct device *dev,
return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget);
}
-static ssize_t store_ttarget(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct platform_data *pdata = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct temp_data *tdata = pdata->core_data[attr->index];
- u32 eax, edx;
- unsigned long val;
- int diff;
-
- if (strict_strtoul(buf, 10, &val))
- return -EINVAL;
-
- /*
- * THERM_MASK_THRESHOLD1 is 7 bits wide. Values are entered in terms
- * of milli degree celsius. Hence don't accept val > (127 * 1000)
- */
- if (val > tdata->tjmax || val > 127000)
- return -EINVAL;
-
- diff = (tdata->tjmax - val) / 1000;
-
- mutex_lock(&tdata->update_lock);
- rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
- eax = (eax & ~THERM_MASK_THRESHOLD1) |
- (diff << THERM_SHIFT_THRESHOLD1);
- wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
- tdata->ttarget = val;
- mutex_unlock(&tdata->update_lock);
-
- return count;
-}
-
-static ssize_t show_tmin(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct platform_data *pdata = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tmin);
-}
-
-static ssize_t store_tmin(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct platform_data *pdata = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct temp_data *tdata = pdata->core_data[attr->index];
- u32 eax, edx;
- unsigned long val;
- int diff;
-
- if (strict_strtoul(buf, 10, &val))
- return -EINVAL;
-
- /*
- * THERM_MASK_THRESHOLD0 is 7 bits wide. Values are entered in terms
- * of milli degree celsius. Hence don't accept val > (127 * 1000)
- */
- if (val > tdata->tjmax || val > 127000)
- return -EINVAL;
-
- diff = (tdata->tjmax - val) / 1000;
-
- mutex_lock(&tdata->update_lock);
- rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
- eax = (eax & ~THERM_MASK_THRESHOLD0) |
- (diff << THERM_SHIFT_THRESHOLD0);
- wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
- tdata->tmin = val;
- mutex_unlock(&tdata->update_lock);
-
- return count;
-}
-
static ssize_t show_temp(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@@ -374,7 +288,6 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
{
- /* The 100C is default for both mobile and non mobile CPUs */
int err;
u32 eax, edx;
u32 val;
@@ -385,7 +298,8 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
*/
err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
if (err) {
- dev_warn(dev, "Unable to read TjMax from CPU.\n");
+ if (c->x86_model > 0xe && c->x86_model != 0x1c)
+ dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
} else {
val = (eax >> 16) & 0xff;
/*
@@ -393,11 +307,17 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
* will be used
*/
if (val) {
- dev_info(dev, "TjMax is %d C.\n", val);
+ dev_dbg(dev, "TjMax is %d degrees C\n", val);
return val * 1000;
}
}
+ if (force_tjmax) {
+ dev_notice(dev, "TjMax forced to %d degrees C by user\n",
+ force_tjmax);
+ return force_tjmax * 1000;
+ }
+
/*
* An assumption is made for early CPUs and unreadable MSR.
* NOTE: the calculated value may not be correct.
@@ -414,21 +334,6 @@ static void __devinit get_ucode_rev_on_cpu(void *edx)
rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx);
}
-static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
-{
- int err;
- u32 eax, edx, val;
-
- err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
- if (!err) {
- val = (eax >> 16) & 0xff;
- if (val)
- return val * 1000;
- }
- dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu);
- return 100000; /* Default TjMax: 100 degree celsius */
-}
-
static int create_name_attr(struct platform_data *pdata, struct device *dev)
{
sysfs_attr_init(&pdata->name_attr.attr);
@@ -442,19 +347,14 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
int attr_no)
{
int err, i;
- static ssize_t (*rd_ptr[TOTAL_ATTRS]) (struct device *dev,
+ static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
struct device_attribute *devattr, char *buf) = {
show_label, show_crit_alarm, show_temp, show_tjmax,
- show_max_alarm, show_ttarget, show_tmin };
- static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev,
- struct device_attribute *devattr, const char *buf,
- size_t count) = { NULL, NULL, NULL, NULL, NULL,
- store_ttarget, store_tmin };
- static const char *names[TOTAL_ATTRS] = {
+ show_ttarget };
+ static const char *const names[TOTAL_ATTRS] = {
"temp%d_label", "temp%d_crit_alarm",
"temp%d_input", "temp%d_crit",
- "temp%d_max_alarm", "temp%d_max",
- "temp%d_max_hyst" };
+ "temp%d_max" };
for (i = 0; i < tdata->attr_size; i++) {
snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
@@ -462,10 +362,6 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
- if (rw_ptr[i]) {
- tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR;
- tdata->sd_attrs[i].dev_attr.store = rw_ptr[i];
- }
tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
tdata->sd_attrs[i].index = attr_no;
err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
@@ -481,9 +377,9 @@ exit_free:
}
-static int __devinit chk_ucode_version(struct platform_device *pdev)
+static int __cpuinit chk_ucode_version(unsigned int cpu)
{
- struct cpuinfo_x86 *c = &cpu_data(pdev->id);
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
int err;
u32 edx;
@@ -494,17 +390,15 @@ static int __devinit chk_ucode_version(struct platform_device *pdev)
*/
if (c->x86_model == 0xe && c->x86_mask < 0xc) {
/* check for microcode update */
- err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu,
+ err = smp_call_function_single(cpu, get_ucode_rev_on_cpu,
&edx, 1);
if (err) {
- dev_err(&pdev->dev,
- "Cannot determine microcode revision of "
- "CPU#%u (%d)!\n", pdev->id, err);
+ pr_err("Cannot determine microcode revision of "
+ "CPU#%u (%d)!\n", cpu, err);
return -ENODEV;
} else if (edx < 0x39) {
- dev_err(&pdev->dev,
- "Errata AE18 not fixed, update BIOS or "
- "microcode of the CPU!\n");
+ pr_err("Errata AE18 not fixed, update BIOS or "
+ "microcode of the CPU!\n");
return -ENODEV;
}
}
@@ -538,8 +432,6 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
MSR_IA32_THERM_STATUS;
- tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT :
- MSR_IA32_THERM_INTERRUPT;
tdata->is_pkg_data = pkg_flag;
tdata->cpu = cpu;
tdata->cpu_core_id = TO_CORE_ID(cpu);
@@ -548,11 +440,11 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
return tdata;
}
-static int create_core_data(struct platform_data *pdata,
- struct platform_device *pdev,
+static int create_core_data(struct platform_device *pdev,
unsigned int cpu, int pkg_flag)
{
struct temp_data *tdata;
+ struct platform_data *pdata = platform_get_drvdata(pdev);
struct cpuinfo_x86 *c = &cpu_data(cpu);
u32 eax, edx;
int err, attr_no;
@@ -588,20 +480,21 @@ static int create_core_data(struct platform_data *pdata,
goto exit_free;
/* We can access status register. Get Critical Temperature */
- if (pkg_flag)
- tdata->tjmax = get_pkg_tjmax(pdev->id, &pdev->dev);
- else
- tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
+ tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
/*
- * Test if we can access the intrpt register. If so, increase the
- * 'size' enough to have ttarget/tmin/max_alarm interfaces.
- * Initialize ttarget with bits 16:22 of MSR_IA32_THERM_INTERRUPT
+ * Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET.
+ * The target temperature is available on older CPUs but not in this
+ * register. Atoms don't have the register at all.
*/
- err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx);
- if (!err) {
- tdata->attr_size += MAX_THRESH_ATTRS;
- tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
+ if (c->x86_model > 0xe && c->x86_model != 0x1c) {
+ err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET,
+ &eax, &edx);
+ if (!err) {
+ tdata->ttarget
+ = tdata->tjmax - ((eax >> 8) & 0xff) * 1000;
+ tdata->attr_size++;
+ }
}
pdata->core_data[attr_no] = tdata;
@@ -613,22 +506,20 @@ static int create_core_data(struct platform_data *pdata,
return 0;
exit_free:
+ pdata->core_data[attr_no] = NULL;
kfree(tdata);
return err;
}
static void coretemp_add_core(unsigned int cpu, int pkg_flag)
{
- struct platform_data *pdata;
struct platform_device *pdev = coretemp_get_pdev(cpu);
int err;
if (!pdev)
return;
- pdata = platform_get_drvdata(pdev);
-
- err = create_core_data(pdata, pdev, cpu, pkg_flag);
+ err = create_core_data(pdev, cpu, pkg_flag);
if (err)
dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
}
@@ -652,11 +543,6 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
struct platform_data *pdata;
int err;
- /* Check the microcode version of the CPU */
- err = chk_ucode_version(pdev);
- if (err)
- return err;
-
/* Initialize the per-package data structures */
pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL);
if (!pdata)
@@ -666,7 +552,7 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
if (err)
goto exit_free;
- pdata->phys_proc_id = TO_PHYS_ID(pdev->id);
+ pdata->phys_proc_id = pdev->id;
platform_set_drvdata(pdev, pdata);
pdata->hwmon_dev = hwmon_device_register(&pdev->dev);
@@ -718,7 +604,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
mutex_lock(&pdev_list_mutex);
- pdev = platform_device_alloc(DRVNAME, cpu);
+ pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu));
if (!pdev) {
err = -ENOMEM;
pr_err("Device allocation failed\n");
@@ -738,7 +624,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
}
pdev_entry->pdev = pdev;
- pdev_entry->phys_proc_id = TO_PHYS_ID(cpu);
+ pdev_entry->phys_proc_id = pdev->id;
list_add_tail(&pdev_entry->list, &pdev_list);
mutex_unlock(&pdev_list_mutex);
@@ -799,6 +685,10 @@ static void __cpuinit get_core_online(unsigned int cpu)
return;
if (!pdev) {
+ /* Check the microcode version of the CPU */
+ if (chk_ucode_version(cpu))
+ return;
+
/*
* Alright, we have DTS support.
* We are bringing the _first_ core in this pkg
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index 257957c69d9..4f7c3fc40a8 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -72,7 +72,7 @@ struct ds620_data {
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
- u16 temp[3]; /* Register values, word */
+ s16 temp[3]; /* Register values, word */
};
/*
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index a561c3a0e91..397fc59b568 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -978,6 +978,8 @@ static void pmbus_find_max_attr(struct i2c_client *client,
struct pmbus_limit_attr {
u16 reg; /* Limit register */
bool update; /* True if register needs updates */
+ bool low; /* True if low limit; for limits with compare
+ functions only */
const char *attr; /* Attribute name */
const char *alarm; /* Alarm attribute name */
u32 sbit; /* Alarm attribute status bit */
@@ -1029,7 +1031,8 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client,
if (attr->compare) {
pmbus_add_boolean_cmp(data, name,
l->alarm, index,
- cbase, cindex,
+ l->low ? cindex : cbase,
+ l->low ? cbase : cindex,
attr->sbase + page, l->sbit);
} else {
pmbus_add_boolean_reg(data, name,
@@ -1366,11 +1369,13 @@ static const struct pmbus_sensor_attr power_attributes[] = {
static const struct pmbus_limit_attr temp_limit_attrs[] = {
{
.reg = PMBUS_UT_WARN_LIMIT,
+ .low = true,
.attr = "min",
.alarm = "min_alarm",
.sbit = PB_TEMP_UT_WARNING,
}, {
.reg = PMBUS_UT_FAULT_LIMIT,
+ .low = true,
.attr = "lcrit",
.alarm = "lcrit_alarm",
.sbit = PB_TEMP_UT_FAULT,
@@ -1399,11 +1404,13 @@ static const struct pmbus_limit_attr temp_limit_attrs[] = {
static const struct pmbus_limit_attr temp_limit_attrs23[] = {
{
.reg = PMBUS_UT_WARN_LIMIT,
+ .low = true,
.attr = "min",
.alarm = "min_alarm",
.sbit = PB_TEMP_UT_WARNING,
}, {
.reg = PMBUS_UT_FAULT_LIMIT,
+ .low = true,
.attr = "lcrit",
.alarm = "lcrit_alarm",
.sbit = PB_TEMP_UT_FAULT,
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index f2b377c56a3..36d7f270b14 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -390,7 +390,7 @@ temp_from_reg(u16 reg, s16 regval)
{
if (is_word_sized(reg))
return LM75_TEMP_FROM_REG(regval);
- return regval * 1000;
+ return ((s8)regval) * 1000;
}
static inline u16
@@ -398,7 +398,8 @@ temp_to_reg(u16 reg, long temp)
{
if (is_word_sized(reg))
return LM75_TEMP_TO_REG(temp);
- return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), 1000);
+ return (s8)DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000),
+ 1000);
}
/* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */
@@ -1715,7 +1716,8 @@ static void w83627ehf_device_remove_files(struct device *dev)
}
/* Get the monitoring functions started */
-static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
+static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data,
+ enum kinds kind)
{
int i;
u8 tmp, diode;
@@ -1746,10 +1748,16 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01);
/* Get thermal sensor types */
- diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
+ switch (kind) {
+ case w83627ehf:
+ diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
+ break;
+ default:
+ diode = 0x70;
+ }
for (i = 0; i < 3; i++) {
if ((tmp & (0x02 << i)))
- data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 2;
+ data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 3;
else
data->temp_type[i] = 4; /* thermistor */
}
@@ -2016,7 +2024,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
}
/* Initialize the chip */
- w83627ehf_init_device(data);
+ w83627ehf_init_device(data, sio_data->kind);
data->vrm = vid_which_vrm();
superio_enter(sio_data->sioreg);
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 17cf1ab9552..8c2844e5691 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83791d_remove(struct i2c_client *client);
-static int w83791d_read(struct i2c_client *client, u8 register);
-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
+static int w83791d_read(struct i2c_client *client, u8 reg);
+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
static struct w83791d_data *w83791d_update_device(struct device *dev);
#ifdef DEBUG
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 274798068a5..16f69be820c 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -435,7 +435,12 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
if (!(rq->cmd_flags & REQ_FLUSH))
return BLKPREP_OK;
- cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (rq->special) {
+ cmd = rq->special;
+ memset(cmd, 0, sizeof(*cmd));
+ } else {
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ }
/* FIXME: map struct ide_taskfile on rq->cmd[] */
BUG_ON(cmd == NULL);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 17bf9d95463..6cd642aaa4d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -287,7 +287,7 @@ void __free_ep(struct kref *kref)
if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
dst_release(ep->dst);
- l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+ l2t_release(ep->com.tdev, ep->l2t);
}
kfree(ep);
}
@@ -1178,7 +1178,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
release_tid(ep->com.tdev, GET_TID(rpl), NULL);
cxgb3_free_atid(ep->com.tdev, ep->atid);
dst_release(ep->dst);
- l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+ l2t_release(ep->com.tdev, ep->l2t);
put_ep(&ep->com);
return CPL_RET_BUF_DONE;
}
@@ -1377,7 +1377,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
if (!child_ep) {
printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
__func__);
- l2t_release(L2DATA(tdev), l2t);
+ l2t_release(tdev, l2t);
dst_release(dst);
goto reject;
}
@@ -1956,7 +1956,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (!err)
goto out;
- l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);
+ l2t_release(h->rdev.t3cdev_p, ep->l2t);
fail4:
dst_release(ep->dst);
fail3:
@@ -2127,7 +2127,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
l2t);
dst_hold(new);
- l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+ l2t_release(ep->com.tdev, ep->l2t);
ep->l2t = l2t;
dst_release(old);
ep->dst = new;
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 7b404e5443e..e34eeb8ae37 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -668,4 +668,3 @@ module_exit(adp5588_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("ADP5588/87 Keypad driver");
-MODULE_ALIAS("platform:adp5588-keys");
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index b09c7d12721..ab860511f01 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -475,7 +475,7 @@ static void cm109_toggle_buzzer_sync(struct cm109_dev *dev, int on)
le16_to_cpu(dev->ctl_req->wIndex),
dev->ctl_data,
USB_PKT_LEN, USB_CTRL_SET_TIMEOUT);
- if (error && error != EINTR)
+ if (error < 0 && error != -EINTR)
err("%s: usb_control_msg() failed %d", __func__, error);
}
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index da280189ef0..5ec617e28f7 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -67,6 +67,10 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245
#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246
#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247
+/* MacbookAir4,1 (unibody, July 2011) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b
/* MacbookAir4,2 (unibody, July 2011) */
#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
@@ -112,6 +116,10 @@ static const struct usb_device_id bcm5974_table[] = {
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
+ /* MacbookAir4,1 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
/* MacbookAir4,2 */
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
@@ -334,6 +342,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
{ DIM_X, DIM_X / SN_COORD, -4750, 5280 },
{ DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
},
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS,
+ HAS_INTEGRATED_BUTTON,
+ 0x84, sizeof(struct bt_data),
+ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+ { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
+ { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
+ },
{}
};
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index d27c9d91630..958b4eb6369 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -229,13 +229,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
get_unaligned_le16(&report[i + 3]);
i += 4;
}
- } else if (usage == WCM_DIGITIZER) {
- /* max pressure isn't reported
- features->pressure_max = (unsigned short)
- (report[i+4] << 8 | report[i + 3]);
- */
- features->pressure_max = 255;
- i += 4;
}
break;
@@ -291,13 +284,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
pen = 1;
i++;
break;
-
- case HID_USAGE_UNDEFINED:
- if (usage == WCM_DESKTOP && finger) /* capacity */
- features->pressure_max =
- get_unaligned_le16(&report[i + 3]);
- i += 4;
- break;
}
break;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index c1c2f7b28d8..9dea71849f4 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -800,25 +800,26 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
int i;
for (i = 0; i < 2; i++) {
- int p = data[9 * i + 2];
- bool touch = p && !wacom->shared->stylus_in_proximity;
+ int offset = (data[1] & 0x80) ? (8 * i) : (9 * i);
+ bool touch = data[offset + 3] & 0x80;
- input_mt_slot(input, i);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
/*
* Touch events need to be disabled while stylus is
* in proximity because user's hand is resting on touchpad
* and sending unwanted events. User expects tablet buttons
* to continue working though.
*/
+ touch = touch && !wacom->shared->stylus_in_proximity;
+
+ input_mt_slot(input, i);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
if (touch) {
- int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff;
- int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff;
+ int x = get_unaligned_be16(&data[offset + 3]) & 0x7ff;
+ int y = get_unaligned_be16(&data[offset + 5]) & 0x7ff;
if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) {
x <<= 5;
y <<= 5;
}
- input_report_abs(input, ABS_MT_PRESSURE, p);
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
}
@@ -1056,10 +1057,11 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
features->x_fuzz, 0);
input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,
features->y_fuzz, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max,
- features->pressure_fuzz, 0);
if (features->device_type == BTN_TOOL_PEN) {
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max,
+ features->pressure_fuzz, 0);
+
/* penabled devices have fixed resolution for each model */
input_abs_set_res(input_dev, ABS_X, features->x_resolution);
input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
@@ -1098,6 +1100,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
__set_bit(BTN_STYLUS, input_dev->keybit);
__set_bit(BTN_STYLUS2, input_dev->keybit);
+
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
break;
case WACOM_21UX2:
@@ -1120,12 +1124,12 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
for (i = 0; i < 8; i++)
__set_bit(BTN_0 + i, input_dev->keybit);
- if (wacom_wac->features.type != WACOM_21UX2) {
- input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
- input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
- }
-
+ input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
+ input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+
wacom_setup_cintiq(wacom_wac);
break;
@@ -1150,6 +1154,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
/* fall through */
case INTUOS:
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
wacom_setup_intuos(wacom_wac);
break;
@@ -1165,6 +1171,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
wacom_setup_intuos(wacom_wac);
+
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
break;
case TABLETPC2FG:
@@ -1183,26 +1191,40 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
case TABLETPC:
__clear_bit(ABS_MISC, input_dev->absbit);
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+
if (features->device_type != BTN_TOOL_PEN)
break; /* no need to process stylus stuff */
/* fall through */
case PL:
- case PTU:
case DTU:
__set_bit(BTN_TOOL_PEN, input_dev->keybit);
+ __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
__set_bit(BTN_STYLUS, input_dev->keybit);
__set_bit(BTN_STYLUS2, input_dev->keybit);
+
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+ break;
+
+ case PTU:
+ __set_bit(BTN_STYLUS2, input_dev->keybit);
/* fall through */
case PENPARTNER:
+ __set_bit(BTN_TOOL_PEN, input_dev->keybit);
__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+ __set_bit(BTN_STYLUS, input_dev->keybit);
+
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
break;
case BAMBOO_PT:
__clear_bit(ABS_MISC, input_dev->absbit);
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
if (features->device_type == BTN_TOOL_DOUBLETAP) {
__set_bit(BTN_LEFT, input_dev->keybit);
__set_bit(BTN_FORWARD, input_dev->keybit);
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index c14412ef464..9941d39df43 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -383,6 +383,8 @@ static int w8001_setup(struct w8001 *w8001)
dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name));
+ __set_bit(INPUT_PROP_DIRECT, dev->propbit);
+
/* penabled? */
error = w8001_command(w8001, W8001_CMD_QUERY, true);
if (!error) {
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 3dc9befa5ae..6dcc7e2d54d 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1388,7 +1388,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
return ret;
}
- ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
+ ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
if (ret)
printk(KERN_ERR "IOMMU: can't request irq\n");
return ret;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c621c98c99d..a88f3cbb100 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -306,6 +306,11 @@ static inline bool dma_pte_present(struct dma_pte *pte)
return (pte->val & 3) != 0;
}
+static inline bool dma_pte_superpage(struct dma_pte *pte)
+{
+ return (pte->val & (1 << 7));
+}
+
static inline int first_pte_in_page(struct dma_pte *pte)
{
return !((unsigned long)pte & ~VTD_PAGE_MASK);
@@ -404,6 +409,9 @@ static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
+int intel_iommu_gfx_mapped;
+EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
+
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
@@ -577,17 +585,18 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
static void domain_update_iommu_superpage(struct dmar_domain *domain)
{
- int i, mask = 0xf;
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu = NULL;
+ int mask = 0xf;
if (!intel_iommu_superpage) {
domain->iommu_superpage = 0;
return;
}
- domain->iommu_superpage = 4; /* 1TiB */
-
- for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
- mask |= cap_super_page_val(g_iommus[i]->cap);
+ /* set iommu_superpage to the smallest common denominator */
+ for_each_active_iommu(iommu, drhd) {
+ mask &= cap_super_page_val(iommu->cap);
if (!mask) {
break;
}
@@ -730,29 +739,23 @@ out:
}
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
- unsigned long pfn, int large_level)
+ unsigned long pfn, int target_level)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
struct dma_pte *parent, *pte = NULL;
int level = agaw_to_level(domain->agaw);
- int offset, target_level;
+ int offset;
BUG_ON(!domain->pgd);
BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
parent = domain->pgd;
- /* Search pte */
- if (!large_level)
- target_level = 1;
- else
- target_level = large_level;
-
while (level > 0) {
void *tmp_page;
offset = pfn_level_offset(pfn, level);
pte = &parent[offset];
- if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE))
+ if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
break;
if (level == target_level)
break;
@@ -816,13 +819,14 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
}
/* clear last level pte, a tlb flush should be followed */
-static void dma_pte_clear_range(struct dmar_domain *domain,
+static int dma_pte_clear_range(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
unsigned int large_page = 1;
struct dma_pte *first_pte, *pte;
+ int order;
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -846,6 +850,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
(void *)pte - (void *)first_pte);
} while (start_pfn && start_pfn <= last_pfn);
+
+ order = (large_page - 1) * 9;
+ return order;
}
/* free page table pages. last level pte should already be cleared */
@@ -3226,9 +3233,6 @@ static void __init init_no_remapping_devices(void)
}
}
- if (dmar_map_gfx)
- return;
-
for_each_drhd_unit(drhd) {
int i;
if (drhd->ignored || drhd->include_all)
@@ -3236,18 +3240,23 @@ static void __init init_no_remapping_devices(void)
for (i = 0; i < drhd->devices_cnt; i++)
if (drhd->devices[i] &&
- !IS_GFX_DEVICE(drhd->devices[i]))
+ !IS_GFX_DEVICE(drhd->devices[i]))
break;
if (i < drhd->devices_cnt)
continue;
- /* bypass IOMMU if it is just for gfx devices */
- drhd->ignored = 1;
- for (i = 0; i < drhd->devices_cnt; i++) {
- if (!drhd->devices[i])
- continue;
- drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+ /* This IOMMU has *only* gfx devices. Either bypass it or
+ set the gfx_mapped flag, as appropriate */
+ if (dmar_map_gfx) {
+ intel_iommu_gfx_mapped = 1;
+ } else {
+ drhd->ignored = 1;
+ for (i = 0; i < drhd->devices_cnt; i++) {
+ if (!drhd->devices[i])
+ continue;
+ drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+ }
}
}
}
@@ -3568,6 +3577,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
found = 1;
}
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
if (found == 0) {
unsigned long tmp_flags;
spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
@@ -3584,8 +3595,6 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
spin_unlock_irqrestore(&iommu->lock, tmp_flags);
}
}
-
- spin_unlock_irqrestore(&device_domain_lock, flags);
}
static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
@@ -3739,6 +3748,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
vm_domain_exit(dmar_domain);
return -ENOMEM;
}
+ domain_update_iommu_cap(dmar_domain);
domain->priv = dmar_domain;
return 0;
@@ -3864,14 +3874,15 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
{
struct dmar_domain *dmar_domain = domain->priv;
size_t size = PAGE_SIZE << gfp_order;
+ int order;
- dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
+ order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
(iova + size - 1) >> VTD_PAGE_SHIFT);
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
- return gfp_order;
+ return order;
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -3950,7 +3961,11 @@ static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
dmar_map_gfx = 0;
- }
+ } else if (dmar_map_gfx) {
+ /* we have to ensure the gfx device is idle before we flush */
+ printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
+ intel_iommu_strict = 1;
+ }
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 49da55c1528..8c2a000cf3f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1698,6 +1698,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ti->num_flush_requests = 1;
+ ti->discard_zeroes_data_unsupported = 1;
+
return 0;
bad:
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 89f73ca22cf..f84c08029b2 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -81,8 +81,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
* corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
*/
if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
- if (!argc)
+ if (!argc) {
ti->error = "Feature corrupt_bio_byte requires parameters";
+ return -EINVAL;
+ }
r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
if (r)
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index f8214702963..32ac70861d6 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -628,6 +628,7 @@ void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
job->kc = kc;
job->fn = fn;
job->context = context;
+ job->master_job = job;
atomic_inc(&kc->nr_jobs);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index a002dd85db1..86df8b2cf92 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -449,7 +449,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
rs->ti->error = "write_mostly option is only valid for RAID1";
return -EINVAL;
}
- if (value > rs->md.raid_disks) {
+ if (value >= rs->md.raid_disks) {
rs->ti->error = "Invalid write_mostly drive index given";
return -EINVAL;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 986b8754bb0..bc04518e9d8 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1238,14 +1238,15 @@ static void dm_table_set_integrity(struct dm_table *t)
return;
template_disk = dm_table_get_integrity_disk(t, true);
- if (!template_disk &&
- blk_integrity_is_initialized(dm_disk(t->md))) {
+ if (template_disk)
+ blk_integrity_register(dm_disk(t->md),
+ blk_get_integrity(template_disk));
+ else if (blk_integrity_is_initialized(dm_disk(t->md)))
DMWARN("%s: device no longer has a valid integrity profile",
dm_device_name(t->md));
- return;
- }
- blk_integrity_register(dm_disk(t->md),
- blk_get_integrity(template_disk));
+ else
+ DMWARN("%s: unable to establish an integrity profile",
+ dm_device_name(t->md));
}
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
@@ -1282,6 +1283,22 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
return 0;
}
+static bool dm_table_discard_zeroes_data(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ /* Ensure that all targets supports discard_zeroes_data. */
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (ti->discard_zeroes_data_unsupported)
+ return 0;
+ }
+
+ return 1;
+}
+
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@@ -1304,6 +1321,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
blk_queue_flush(q, flush);
+ if (!dm_table_discard_zeroes_data(t))
+ q->limits.discard_zeroes_data = 0;
+
dm_table_set_integrity(t);
/*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5404b229582..5c95ccb5950 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -61,6 +61,11 @@
static void autostart_arrays(int part);
#endif
+/* pers_list is a list of registered personalities protected
+ * by pers_lock.
+ * pers_lock does extra service to protect accesses to
+ * mddev->thread when the mutex cannot be held.
+ */
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
@@ -739,7 +744,12 @@ static void mddev_unlock(mddev_t * mddev)
} else
mutex_unlock(&mddev->reconfig_mutex);
+ /* was we've dropped the mutex we need a spinlock to
+ * make sur the thread doesn't disappear
+ */
+ spin_lock(&pers_lock);
md_wakeup_thread(mddev->thread);
+ spin_unlock(&pers_lock);
}
static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
@@ -6429,11 +6439,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
return thread;
}
-void md_unregister_thread(mdk_thread_t *thread)
+void md_unregister_thread(mdk_thread_t **threadp)
{
+ mdk_thread_t *thread = *threadp;
if (!thread)
return;
dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
+ /* Locking ensures that mddev_unlock does not wake_up a
+ * non-existent thread
+ */
+ spin_lock(&pers_lock);
+ *threadp = NULL;
+ spin_unlock(&pers_lock);
kthread_stop(thread->tsk);
kfree(thread);
@@ -7340,8 +7357,7 @@ static void reap_sync_thread(mddev_t *mddev)
mdk_rdev_t *rdev;
/* resync has finished, collect result */
- md_unregister_thread(mddev->sync_thread);
- mddev->sync_thread = NULL;
+ md_unregister_thread(&mddev->sync_thread);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 1e586bb4452..0a309dc29b4 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -560,7 +560,7 @@ extern int register_md_personality(struct mdk_personality *p);
extern int unregister_md_personality(struct mdk_personality *p);
extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
mddev_t *mddev, const char *name);
-extern void md_unregister_thread(mdk_thread_t *thread);
+extern void md_unregister_thread(mdk_thread_t **threadp);
extern void md_wakeup_thread(mdk_thread_t *thread);
extern void md_check_recovery(mddev_t *mddev);
extern void md_write_start(mddev_t *mddev, struct bio *bi);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 3535c23af28..d5b5fb30017 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -514,8 +514,7 @@ static int multipath_stop (mddev_t *mddev)
{
multipath_conf_t *conf = mddev->private;
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
+ md_unregister_thread(&mddev->thread);
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
mempool_destroy(conf->pool);
kfree(conf->multipaths);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f4622dd8fc5..d9587dffe53 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2562,8 +2562,7 @@ static int stop(mddev_t *mddev)
raise_barrier(conf);
lower_barrier(conf);
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
+ md_unregister_thread(&mddev->thread);
if (conf->r1bio_pool)
mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d7a8468ddea..0cd9672cf9c 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2955,7 +2955,7 @@ static int run(mddev_t *mddev)
return 0;
out_free_conf:
- md_unregister_thread(mddev->thread);
+ md_unregister_thread(&mddev->thread);
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
safe_put_page(conf->tmppage);
@@ -2973,8 +2973,7 @@ static int stop(mddev_t *mddev)
raise_barrier(conf, 0);
lower_barrier(conf);
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
+ md_unregister_thread(&mddev->thread);
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 43709fa6b6d..ac5e8b57e50 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4941,8 +4941,7 @@ static int run(mddev_t *mddev)
return 0;
abort:
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
+ md_unregister_thread(&mddev->thread);
if (conf) {
print_raid5_conf(conf);
free_conf(conf);
@@ -4956,8 +4955,7 @@ static int stop(mddev_t *mddev)
{
raid5_conf_t *conf = mddev->private;
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
+ md_unregister_thread(&mddev->thread);
if (mddev->queue)
mddev->queue->backing_dev_info.congested_fn = NULL;
free_conf(conf);
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index b5ef3622244..b3a5ecdb33a 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -2194,19 +2194,6 @@ static int __init omap_vout_probe(struct platform_device *pdev)
"'%s' Display already enabled\n",
def_display->name);
}
- /* set the update mode */
- if (def_display->caps &
- OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
- if (dssdrv->enable_te)
- dssdrv->enable_te(def_display, 0);
- if (dssdrv->set_update_mode)
- dssdrv->set_update_mode(def_display,
- OMAP_DSS_UPDATE_MANUAL);
- } else {
- if (dssdrv->set_update_mode)
- dssdrv->set_update_mode(def_display,
- OMAP_DSS_UPDATE_AUTO);
- }
}
}
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index 9d3459de04b..80796eb0c53 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -31,6 +31,7 @@
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <media/v4l2-event.h>
#include "isp.h"
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index d29f9c2d085..e4100b1f68d 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -1961,7 +1961,7 @@ static int __uvc_resume(struct usb_interface *intf, int reset)
list_for_each_entry(stream, &dev->streams, list) {
if (stream->intf == intf)
- return uvc_video_resume(stream);
+ return uvc_video_resume(stream, reset);
}
uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface "
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
index 48fea373c25..29e239911d0 100644
--- a/drivers/media/video/uvc/uvc_entity.c
+++ b/drivers/media/video/uvc/uvc_entity.c
@@ -49,7 +49,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
if (remote == NULL)
return -EINVAL;
- source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING)
+ source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
? (remote->vdev ? &remote->vdev->entity : NULL)
: &remote->subdev.entity;
if (source == NULL)
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 8244167c891..ffd1158628b 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -1104,10 +1104,18 @@ int uvc_video_suspend(struct uvc_streaming *stream)
* buffers, making sure userspace applications are notified of the problem
* instead of waiting forever.
*/
-int uvc_video_resume(struct uvc_streaming *stream)
+int uvc_video_resume(struct uvc_streaming *stream, int reset)
{
int ret;
+ /* If the bus has been reset on resume, set the alternate setting to 0.
+ * This should be the default value, but some devices crash or otherwise
+ * misbehave if they don't receive a SET_INTERFACE request before any
+ * other video control request.
+ */
+ if (reset)
+ usb_set_interface(stream->dev->udev, stream->intfnum, 0);
+
stream->frozen = 0;
ret = uvc_commit_video(stream, &stream->ctrl);
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index df32a43ca86..cbdd49bf8b6 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -638,7 +638,7 @@ extern void uvc_mc_cleanup_entity(struct uvc_entity *entity);
/* Video */
extern int uvc_video_init(struct uvc_streaming *stream);
extern int uvc_video_suspend(struct uvc_streaming *stream);
-extern int uvc_video_resume(struct uvc_streaming *stream);
+extern int uvc_video_resume(struct uvc_streaming *stream, int reset);
extern int uvc_video_enable(struct uvc_streaming *stream, int enable);
extern int uvc_probe_video(struct uvc_streaming *stream,
struct uvc_streaming_control *probe);
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 06f14008b34..a5c9ed128b9 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -173,6 +173,17 @@ static void v4l2_device_release(struct device *cd)
media_device_unregister_entity(&vdev->entity);
#endif
+ /* Do not call v4l2_device_put if there is no release callback set.
+ * Drivers that have no v4l2_device release callback might free the
+ * v4l2_dev instance in the video_device release callback below, so we
+ * must perform this check here.
+ *
+ * TODO: In the long run all drivers that use v4l2_device should use the
+ * v4l2_device release callback. This check will then be unnecessary.
+ */
+ if (v4l2_dev && v4l2_dev->release == NULL)
+ v4l2_dev = NULL;
+
/* Release video_device and perform other
cleanups as needed. */
vdev->release(vdev);
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index c72856c4143..e6a2c3b302d 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -38,6 +38,7 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
mutex_init(&v4l2_dev->ioctl_lock);
v4l2_prio_init(&v4l2_dev->prio);
kref_init(&v4l2_dev->ref);
+ get_device(dev);
v4l2_dev->dev = dev;
if (dev == NULL) {
/* If dev == NULL, then name must be filled in by the caller */
@@ -93,6 +94,7 @@ void v4l2_device_disconnect(struct v4l2_device *v4l2_dev)
if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev)
dev_set_drvdata(v4l2_dev->dev, NULL);
+ put_device(v4l2_dev->dev);
v4l2_dev->dev = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_device_disconnect);
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 21131c7b0f1..563654c9b19 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -273,7 +273,7 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
ct->regs.ack = JZ_REG_ADC_STATUS;
ct->chip.irq_mask = irq_gc_mask_set_bit;
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
- ct->chip.irq_ack = irq_gc_ack;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 5d1fca0277e..f83103b8970 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -135,10 +135,13 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
max8997->dev = &i2c->dev;
max8997->i2c = i2c;
max8997->type = id->driver_data;
+ max8997->irq = i2c->irq;
if (!pdata)
goto err;
+ max8997->irq_base = pdata->irq_base;
+ max8997->ono = pdata->ono;
max8997->wakeup = pdata->wakeup;
mutex_init(&max8997->iolock);
@@ -152,6 +155,8 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
pm_runtime_set_active(max8997->dev);
+ max8997_irq_init(max8997);
+
mfd_add_devices(max8997->dev, -1, max8997_devs,
ARRAY_SIZE(max8997_devs),
NULL, 0);
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 29601e7d606..86e14583a08 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -676,7 +677,6 @@ static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count)
| OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
| OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
- reg |= (1 << (i + 1));
} else
continue;
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
index 2bfad5c86cc..a56be931551 100644
--- a/drivers/mfd/tps65910-irq.c
+++ b/drivers/mfd/tps65910-irq.c
@@ -178,8 +178,10 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
switch (tps65910_chip_id(tps65910)) {
case TPS65910:
tps65910->irq_num = TPS65910_NUM_IRQ;
+ break;
case TPS65911:
tps65910->irq_num = TPS65911_NUM_IRQ;
+ break;
}
/* Register with genirq */
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
index b5d598c3aa7..7cbf2aa9e64 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/mfd/twl4030-madc.c
@@ -510,8 +510,9 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
u8 ch_msb, ch_lsb;
int ret;
- if (!req)
+ if (!req || !twl4030_madc)
return -EINVAL;
+
mutex_lock(&twl4030_madc->lock);
if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) {
ret = -EINVAL;
@@ -706,6 +707,8 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
if (!madc)
return -ENOMEM;
+ madc->dev = &pdev->dev;
+
/*
* Phoenix provides 2 interrupt lines. The first one is connected to
* the OMAP. The other one can be connected to the other processor such
diff --git a/drivers/mfd/wm8350-gpio.c b/drivers/mfd/wm8350-gpio.c
index ebf99bef392..d584f6b4d6e 100644
--- a/drivers/mfd/wm8350-gpio.c
+++ b/drivers/mfd/wm8350-gpio.c
@@ -37,7 +37,7 @@ static int gpio_set_dir(struct wm8350 *wm8350, int gpio, int dir)
return ret;
}
-static int gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db)
+static int wm8350_gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db)
{
if (db == WM8350_GPIO_DEBOUNCE_ON)
return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE,
@@ -210,7 +210,7 @@ int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func,
goto err;
if (gpio_set_polarity(wm8350, gpio, pol))
goto err;
- if (gpio_set_debounce(wm8350, gpio, debounce))
+ if (wm8350_gpio_set_debounce(wm8350, gpio, debounce))
goto err;
if (gpio_set_dir(wm8350, gpio, dir))
goto err;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index b928bc14e97..8b51cd62d06 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -375,12 +375,14 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
* both have been read. So the value read will always be correct.
* Set BOOT bit to refresh factory tuning values.
*/
- lis3->read(lis3, CTRL_REG2, &reg);
- if (lis3->whoami == WAI_12B)
- reg |= CTRL2_BDU | CTRL2_BOOT;
- else
- reg |= CTRL2_BOOT_8B;
- lis3->write(lis3, CTRL_REG2, reg);
+ if (lis3->pdata) {
+ lis3->read(lis3, CTRL_REG2, &reg);
+ if (lis3->whoami == WAI_12B)
+ reg |= CTRL2_BDU | CTRL2_BOOT;
+ else
+ reg |= CTRL2_BOOT_8B;
+ lis3->write(lis3, CTRL_REG2, reg);
+ }
/* LIS3 power on delay is quite long */
msleep(lis3->pwron_delay / lis3lv02d_get_odr());
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 1ff5486213f..4c1a648d00f 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -926,6 +926,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
/*
* Reliable writes are used to implement Forced Unit Access and
* REQ_META accesses, and are supported only on MMCs.
+ *
+ * XXX: this really needs a good explanation of why REQ_META
+ * is treated special.
*/
bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
(req->cmd_flags & REQ_META)) &&
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8b8efe52b22..ddd63e7cf4a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2534,7 +2534,7 @@ config S6GMAC
source "drivers/net/stmmac/Kconfig"
config PCH_GBE
- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE"
+ tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
depends on PCI
select MII
---help---
@@ -2547,10 +2547,11 @@ config PCH_GBE
This driver enables Gigabit Ethernet function.
This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7223.
- ML7223 IOH is for MP(Media Phone) use.
- ML7223 is companion chip for Intel Atom E6xx series.
- ML7223 is completely compatible for Intel EG20T PCH.
+ Output Hub), ML7223/ML7831.
+ ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general
+ purpose use.
+ ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7223/ML7831 is completely compatible for Intel EG20T PCH.
config FTGMAC100
tristate "Faraday FTGMAC100 Gigabit Ethernet support"
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index c423504a755..9a7eb3b36cf 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -239,13 +239,19 @@ void bnx2x_int_disable(struct bnx2x *bp);
* FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
*
*/
-/* iSCSI L2 */
-#define BNX2X_ISCSI_ETH_CL_ID_IDX 1
-#define BNX2X_ISCSI_ETH_CID 49
+enum {
+ BNX2X_ISCSI_ETH_CL_ID_IDX,
+ BNX2X_FCOE_ETH_CL_ID_IDX,
+ BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
+};
-/* FCoE L2 */
-#define BNX2X_FCOE_ETH_CL_ID_IDX 2
-#define BNX2X_FCOE_ETH_CID 50
+#define BNX2X_CNIC_START_ETH_CID 48
+enum {
+ /* iSCSI L2 */
+ BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID,
+ /* FCoE L2 */
+ BNX2X_FCOE_ETH_CID,
+};
/** Additional rings budgeting */
#ifdef BCM_CNIC
@@ -315,6 +321,14 @@ union db_prod {
u32 raw;
};
+/* dropless fc FW/HW related params */
+#define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512)
+#define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \
+ ETH_MAX_AGGREGATION_QUEUES_E1 :\
+ ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
+#define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
+#define FW_PREFETCH_CNT 16
+#define DROPLESS_FC_HEADROOM 100
/* MC hsi */
#define BCM_PAGE_SHIFT 12
@@ -331,15 +345,35 @@ union db_prod {
/* SGE ring related macros */
#define NUM_RX_SGE_PAGES 2
#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
-#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
+#define NEXT_PAGE_SGE_DESC_CNT 2
+#define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
/* RX_SGE_CNT is promised to be a power of 2 */
#define RX_SGE_MASK (RX_SGE_CNT - 1)
#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
#define MAX_RX_SGE (NUM_RX_SGE - 1)
#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
- (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
+ (MAX_RX_SGE_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
+ (x) + 1)
#define RX_SGE(x) ((x) & MAX_RX_SGE)
+/*
+ * Number of required SGEs is the sum of two:
+ * 1. Number of possible opened aggregations (next packet for
+ * these aggregations will probably consume SGE immidiatelly)
+ * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
+ * after placement on BD for new TPA aggregation)
+ *
+ * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
+ */
+#define NUM_SGE_REQ (MAX_AGG_QS(bp) + \
+ (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
+#define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
+ MAX_RX_SGE_CNT)
+#define SGE_TH_LO(bp) (NUM_SGE_REQ + \
+ NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
+#define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
/* Manipulate a bit vector defined as an array of u64 */
/* Number of bits in one sge_mask array element */
@@ -551,24 +585,43 @@ struct bnx2x_fastpath {
#define NUM_TX_RINGS 16
#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
-#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
+#define NEXT_PAGE_TX_DESC_CNT 1
+#define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
#define MAX_TX_BD (NUM_TX_BD - 1)
#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
- (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
+ (MAX_TX_DESC_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
+ (x) + 1)
#define TX_BD(x) ((x) & MAX_TX_BD)
#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
#define NUM_RX_RINGS 8
#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
-#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2)
+#define NEXT_PAGE_RX_DESC_CNT 2
+#define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
#define RX_DESC_MASK (RX_DESC_CNT - 1)
#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
#define MAX_RX_BD (NUM_RX_BD - 1)
#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
-#define MIN_RX_AVAIL 128
+
+/* dropless fc calculations for BDs
+ *
+ * Number of BDs should as number of buffers in BRB:
+ * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_BD_REQ BRB_SIZE(bp)
+#define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
+ MAX_RX_DESC_CNT)
+#define BD_TH_LO(bp) (NUM_BD_REQ + \
+ NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
+ FW_DROP_LEVEL(bp))
+#define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
+#define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \
ETH_MIN_RX_CQES_WITH_TPA_E1 : \
@@ -579,7 +632,9 @@ struct bnx2x_fastpath {
MIN_RX_AVAIL))
#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
- (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
+ (MAX_RX_DESC_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
+ (x) + 1)
#define RX_BD(x) ((x) & MAX_RX_BD)
/*
@@ -589,14 +644,31 @@ struct bnx2x_fastpath {
#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL)
#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
-#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1)
+#define NEXT_PAGE_RCQ_DESC_CNT 1
+#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
- (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
+ (MAX_RCQ_DESC_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
+ (x) + 1)
#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
+/* dropless fc calculations for RCQs
+ *
+ * Number of RCQs should be as number of buffers in BRB:
+ * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_RCQ_REQ BRB_SIZE(bp)
+#define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
+ MAX_RCQ_DESC_CNT)
+#define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \
+ NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
+ FW_DROP_LEVEL(bp))
+#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
/* This is needed for determining of last_max */
#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
@@ -685,24 +757,17 @@ struct bnx2x_fastpath {
#define FP_CSB_FUNC_OFF \
offsetof(struct cstorm_status_block_c, func)
-#define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */
- /* (HC_INDEX_U_TOE_RX_CQ_CONS) */
-#define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */
- /* (HC_INDEX_U_ETH_RX_CQ_CONS) */
-#define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */
- /* (HC_INDEX_U_ETH_RX_BD_CONS) */
-
-#define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */
- /* (HC_INDEX_C_TOE_TX_CQ_CONS) */
-#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */
- /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
-#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */
- /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
-#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */
- /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
+#define HC_INDEX_ETH_RX_CQ_CONS 1
-#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
+#define HC_INDEX_OOO_TX_CQ_CONS 4
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6
+#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7
+
+#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
#define BNX2X_RX_SB_INDEX \
(&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
@@ -1100,11 +1165,12 @@ struct bnx2x {
#define BP_PORT(bp) (bp->pfid & 1)
#define BP_FUNC(bp) (bp->pfid)
#define BP_ABS_FUNC(bp) (bp->pf_num)
-#define BP_E1HVN(bp) (bp->pfid >> 1)
-#define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/
-#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
-#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\
- BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
+#define BP_VN(bp) ((bp)->pfid >> 1)
+#define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
+#define BP_L_ID(bp) (BP_VN(bp) << 2)
+#define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\
+ (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
+#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
struct net_device *dev;
struct pci_dev *pdev;
@@ -1767,7 +1833,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define MAX_DMAE_C_PER_PORT 8
#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
- BP_E1HVN(bp))
+ BP_VN(bp))
#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
E1HVN_MAX)
@@ -1793,7 +1859,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
/* must be used on a CID before placing it on a HW ring */
#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
- (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \
+ (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
(x))
#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 37e5790681a..c4cbf973641 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -987,8 +987,6 @@ void __bnx2x_link_report(struct bnx2x *bp)
void bnx2x_init_rx_rings(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
- int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
- ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
u16 ring_prod;
int i, j;
@@ -1001,7 +999,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
if (!fp->disable_tpa) {
/* Fill the per-aggregtion pool */
- for (i = 0; i < max_agg_queues; i++) {
+ for (i = 0; i < MAX_AGG_QS(bp); i++) {
struct bnx2x_agg_info *tpa_info =
&fp->tpa_info[i];
struct sw_rx_bd *first_buf =
@@ -1041,7 +1039,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
bnx2x_free_rx_sge_range(bp, fp,
ring_prod);
bnx2x_free_tpa_pool(bp, fp,
- max_agg_queues);
+ MAX_AGG_QS(bp));
fp->disable_tpa = 1;
ring_prod = 0;
break;
@@ -1137,9 +1135,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
bnx2x_free_rx_bds(fp);
if (!fp->disable_tpa)
- bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
- ETH_MAX_AGGREGATION_QUEUES_E1 :
- ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
+ bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
}
}
@@ -3095,15 +3091,20 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
struct bnx2x_fastpath *fp = &bp->fp[index];
int ring_size = 0;
u8 cos;
+ int rx_ring_size = 0;
/* if rx_ring_size specified - use it */
- int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
- MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
+ if (!bp->rx_ring_size) {
- /* allocate at least number of buffers required by FW */
- rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
- MIN_RX_SIZE_TPA,
- rx_ring_size);
+ rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
+
+ /* allocate at least number of buffers required by FW */
+ rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+ MIN_RX_SIZE_TPA, rx_ring_size);
+
+ bp->rx_ring_size = rx_ring_size;
+ } else
+ rx_ring_size = bp->rx_ring_size;
/* Common */
sb = &bnx2x_fp(bp, index, status_blk);
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 223bfeebc59..2dc1199239d 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -1297,7 +1297,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp,
static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
{
return bp->cnic_base_cl_id + cl_idx +
- (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE;
+ (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX;
}
static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
index a1e004a82f7..0b4acf67e0c 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -2120,6 +2120,7 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
break;
case DCB_CAP_ATTR_DCBX:
*cap = BNX2X_DCBX_CAPS;
+ break;
default:
rval = -EINVAL;
break;
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 221863059da..cf3e47914dd 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -363,13 +363,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
/* advertise the requested speed and duplex if supported */
- cmd->advertising &= bp->port.supported[cfg_idx];
+ if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
+ DP(NETIF_MSG_LINK, "Advertisement parameters "
+ "are not supported\n");
+ return -EINVAL;
+ }
bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
- bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL;
- bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg |
+ bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+ bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
cmd->advertising);
+ if (cmd->advertising) {
+
+ bp->link_params.speed_cap_mask[cfg_idx] = 0;
+ if (cmd->advertising & ADVERTISED_10baseT_Half) {
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
+ }
+ if (cmd->advertising & ADVERTISED_10baseT_Full)
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
+ if (cmd->advertising & ADVERTISED_100baseT_Full)
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
+
+ if (cmd->advertising & ADVERTISED_100baseT_Half) {
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
+ }
+ if (cmd->advertising & ADVERTISED_1000baseT_Half) {
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
+ }
+ if (cmd->advertising & (ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseKX_Full))
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
+
+ if (cmd->advertising & (ADVERTISED_10000baseT_Full |
+ ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_10000baseKR_Full))
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
+ }
} else { /* forced speed */
/* advertise the requested speed and duplex if supported */
switch (speed) {
@@ -1310,10 +1347,7 @@ static void bnx2x_get_ringparam(struct net_device *dev,
if (bp->rx_ring_size)
ering->rx_pending = bp->rx_ring_size;
else
- if (bp->state == BNX2X_STATE_OPEN && bp->num_queues)
- ering->rx_pending = MAX_RX_AVAIL/bp->num_queues;
- else
- ering->rx_pending = MAX_RX_AVAIL;
+ ering->rx_pending = MAX_RX_AVAIL;
ering->rx_mini_pending = 0;
ering->rx_jumbo_pending = 0;
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index d45b1555a60..ba15bdc5a1a 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -778,9 +778,9 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
{
u32 nig_reg_adress_crd_weight = 0;
u32 pbf_reg_adress_crd_weight = 0;
- /* Calculate and set BW for this COS*/
- const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw;
- const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw;
+ /* Calculate and set BW for this COS - use 1 instead of 0 for BW */
+ const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
+ const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
switch (cos_entry) {
case 0:
@@ -852,18 +852,12 @@ static int bnx2x_ets_e3b0_get_total_bw(
/* Calculate total BW requested */
for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
-
- if (0 == ets_params->cos[cos_idx].params.bw_params.bw) {
- DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
- "was set to 0\n");
- return -EINVAL;
+ *total_bw +=
+ ets_params->cos[cos_idx].params.bw_params.bw;
}
- *total_bw +=
- ets_params->cos[cos_idx].params.bw_params.bw;
- }
}
- /*Check taotl BW is valid */
+ /* Check total BW is valid */
if ((100 != *total_bw) || (0 == *total_bw)) {
if (0 == *total_bw) {
DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW"
@@ -1726,7 +1720,7 @@ static int bnx2x_xmac_enable(struct link_params *params,
/* Check loopback mode */
if (lb)
- val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK;
+ val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
bnx2x_set_xumac_nig(params,
((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
@@ -3630,6 +3624,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
+ /* Advertised and set FEC (Forward Error Correction) */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
+ (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
+
/* Enable CL37 BAM */
if (REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
@@ -5924,7 +5924,7 @@ int bnx2x_set_led(struct link_params *params,
(tmp | EMAC_LED_OVERRIDE));
/*
* return here without enabling traffic
- * LED blink andsetting rate in ON mode.
+ * LED blink and setting rate in ON mode.
* In oper mode, enabling LED blink
* and setting rate is needed.
*/
@@ -5936,7 +5936,11 @@ int bnx2x_set_led(struct link_params *params,
* This is a work-around for HW issue found when link
* is up in CL73
*/
- REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+ if ((!CHIP_IS_E3(bp)) ||
+ (CHIP_IS_E3(bp) &&
+ mode == LED_MODE_ON))
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
if (CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp) ||
(mode == LED_MODE_ON))
@@ -10638,8 +10642,7 @@ static struct bnx2x_phy phy_warpcore = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
.addr = 0xff,
.def_md_devad = 0,
- .flags = (FLAGS_HW_LOCK_REQUIRED |
- FLAGS_TX_ERROR_CHECK),
+ .flags = FLAGS_HW_LOCK_REQUIRED,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -10765,8 +10768,7 @@ static struct bnx2x_phy phy_8706 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
.addr = 0xff,
.def_md_devad = 0,
- .flags = (FLAGS_INIT_XGXS_FIRST |
- FLAGS_TX_ERROR_CHECK),
+ .flags = FLAGS_INIT_XGXS_FIRST,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -10797,8 +10799,7 @@ static struct bnx2x_phy phy_8726 = {
.addr = 0xff,
.def_md_devad = 0,
.flags = (FLAGS_HW_LOCK_REQUIRED |
- FLAGS_INIT_XGXS_FIRST |
- FLAGS_TX_ERROR_CHECK),
+ FLAGS_INIT_XGXS_FIRST),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -10829,8 +10830,7 @@ static struct bnx2x_phy phy_8727 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
.addr = 0xff,
.def_md_devad = 0,
- .flags = (FLAGS_FAN_FAILURE_DET_REQ |
- FLAGS_TX_ERROR_CHECK),
+ .flags = FLAGS_FAN_FAILURE_DET_REQ,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index f74582a22c6..15f800085bb 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -407,8 +407,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
- opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
- (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
+ opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
+ (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
#ifdef __BIG_ENDIAN
@@ -1419,7 +1419,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
if (!CHIP_IS_E1(bp)) {
/* init leading/trailing edge */
if (IS_MF(bp)) {
- val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
+ val = (0xee0f | (1 << (BP_VN(bp) + 4)));
if (bp->port.pmf)
/* enable nig and gpio3 attention */
val |= 0x1100;
@@ -1471,7 +1471,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
/* init leading/trailing edge */
if (IS_MF(bp)) {
- val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
+ val = (0xee0f | (1 << (BP_VN(bp) + 4)));
if (bp->port.pmf)
/* enable nig and gpio3 attention */
val |= 0x1100;
@@ -2287,7 +2287,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
int vn;
bp->vn_weight_sum = 0;
- for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
u32 vn_cfg = bp->mf_config[vn];
u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
@@ -2320,12 +2320,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
}
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+ return 2 * vn + BP_PORT(bp);
+}
+
static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
{
struct rate_shaping_vars_per_vn m_rs_vn;
struct fairness_vars_per_vn m_fair_vn;
u32 vn_cfg = bp->mf_config[vn];
- int func = 2*vn + BP_PORT(bp);
+ int func = func_by_vn(bp, vn);
u16 vn_min_rate, vn_max_rate;
int i;
@@ -2422,7 +2428,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
*
* and there are 2 functions per port
*/
- for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
if (func >= E1H_FUNC_MAX)
@@ -2454,7 +2460,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
/* calculate and set min-max rate for each vn */
if (bp->port.pmf)
- for (vn = VN_0; vn < E1HVN_MAX; vn++)
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
bnx2x_init_vn_minmax(bp, vn);
/* always enable rate shaping and fairness */
@@ -2473,16 +2479,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
{
- int port = BP_PORT(bp);
int func;
int vn;
/* Set the attention towards other drivers on the same port */
- for (vn = VN_0; vn < E1HVN_MAX; vn++) {
- if (vn == BP_E1HVN(bp))
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ if (vn == BP_VN(bp))
continue;
- func = ((vn << 1) | port);
+ func = func_by_vn(bp, vn);
REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
(LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
}
@@ -2577,7 +2582,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
bnx2x_dcbx_pmf_update(bp);
/* enable nig attention */
- val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
+ val = (0xff0f | (1 << (BP_VN(bp) + 4)));
if (bp->common.int_block == INT_BLOCK_HC) {
REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
@@ -2756,8 +2761,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
u16 tpa_agg_size = 0;
if (!fp->disable_tpa) {
- pause->sge_th_hi = 250;
- pause->sge_th_lo = 150;
+ pause->sge_th_lo = SGE_TH_LO(bp);
+ pause->sge_th_hi = SGE_TH_HI(bp);
+
+ /* validate SGE ring has enough to cross high threshold */
+ WARN_ON(bp->dropless_fc &&
+ pause->sge_th_hi + FW_PREFETCH_CNT >
+ MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
+
tpa_agg_size = min_t(u32,
(min_t(u32, 8, MAX_SKB_FRAGS) *
SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
@@ -2771,10 +2782,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
/* pause - not for e1 */
if (!CHIP_IS_E1(bp)) {
- pause->bd_th_hi = 350;
- pause->bd_th_lo = 250;
- pause->rcq_th_hi = 350;
- pause->rcq_th_lo = 250;
+ pause->bd_th_lo = BD_TH_LO(bp);
+ pause->bd_th_hi = BD_TH_HI(bp);
+
+ pause->rcq_th_lo = RCQ_TH_LO(bp);
+ pause->rcq_th_hi = RCQ_TH_HI(bp);
+ /*
+ * validate that rings have enough entries to cross
+ * high thresholds
+ */
+ WARN_ON(bp->dropless_fc &&
+ pause->bd_th_hi + FW_PREFETCH_CNT >
+ bp->rx_ring_size);
+ WARN_ON(bp->dropless_fc &&
+ pause->rcq_th_hi + FW_PREFETCH_CNT >
+ NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
pause->pri_map = 1;
}
@@ -2802,9 +2824,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
* For PF Clients it should be the maximum avaliable number.
* VF driver(s) may want to define it to a smaller value.
*/
- rxq_init->max_tpa_queues =
- (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
- ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
+ rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
rxq_init->fw_sb_id = fp->fw_sb_id;
@@ -4808,6 +4828,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
hc_sm->time_to_expire = 0xFFFFFFFF;
}
+
+/* allocates state machine ids. */
+static inline
+void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
+{
+ /* zero out state machine indices */
+ /* rx indices */
+ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+
+ /* tx indices */
+ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
+
+ /* map indices */
+ /* rx indices */
+ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
+ SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+
+ /* tx indices */
+ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+}
+
static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id)
{
@@ -4839,6 +4890,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
hc_sm_p = sb_data_e2.common.state_machine;
sb_data_p = (u32 *)&sb_data_e2;
data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+ bnx2x_map_sb_state_machines(sb_data_e2.index_data);
} else {
memset(&sb_data_e1x, 0,
sizeof(struct hc_status_block_data_e1x));
@@ -4853,6 +4905,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
hc_sm_p = sb_data_e1x.common.state_machine;
sb_data_p = (u32 *)&sb_data_e1x;
data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+ bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
}
bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
@@ -4890,7 +4943,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
int igu_seg_id;
int port = BP_PORT(bp);
int func = BP_FUNC(bp);
- int reg_offset;
+ int reg_offset, reg_offset_en5;
u64 section;
int index;
struct hc_sp_status_block_data sp_sb_data;
@@ -4913,6 +4966,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+ reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
int sindex;
/* take care of sig[0]..sig[4] */
@@ -4927,7 +4982,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
* and not 16 between the different groups
*/
bp->attn_group[index].sig[4] = REG_RD(bp,
- reg_offset + 0x10 + 0x4*index);
+ reg_offset_en5 + 0x4*index);
else
bp->attn_group[index].sig[4] = 0;
}
@@ -5802,7 +5857,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
* take the UNDI lock to protect undi_unload flow from accessing
* registers while we're resetting the chip
*/
- bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
bnx2x_reset_common(bp);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
@@ -5814,7 +5869,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
}
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
@@ -6671,12 +6726,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
if (CHIP_MODE_IS_4_PORT(bp))
dsb_idx = BP_FUNC(bp);
else
- dsb_idx = BP_E1HVN(bp);
+ dsb_idx = BP_VN(bp);
prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
IGU_BC_BASE_DSB_PROD + dsb_idx :
IGU_NORM_BASE_DSB_PROD + dsb_idx);
+ /*
+ * igu prods come in chunks of E1HVN_MAX (4) -
+ * does not matters what is the current chip mode
+ */
for (i = 0; i < (num_segs * E1HVN_MAX);
i += E1HVN_MAX) {
addr = IGU_REG_PROD_CONS_MEMORY +
@@ -7568,9 +7627,12 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
u8 *mac_addr = bp->dev->dev_addr;
u32 val;
+ u16 pmc;
+
/* The mac address is written to entries 1-4 to
- preserve entry 0 which is used by the PMF */
- u8 entry = (BP_E1HVN(bp) + 1)*8;
+ * preserve entry 0 which is used by the PMF
+ */
+ u8 entry = (BP_VN(bp) + 1)*8;
val = (mac_addr[0] << 8) | mac_addr[1];
EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
@@ -7579,6 +7641,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
(mac_addr[4] << 8) | mac_addr[5];
EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
+ /* Enable the PME and clear the status */
+ pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
+ pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
+ pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
+
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
} else
@@ -8546,10 +8613,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
/* Check if there is any driver already loaded */
val = REG_RD(bp, MISC_REG_UNPREPARED);
if (val == 0x1) {
- /* Check if it is the UNDI driver
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+ /*
+ * Check if it is the UNDI driver
* UNDI driver initializes CID offset for normal bell to 0x7
*/
- bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
if (val == 0x7) {
u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
@@ -8587,9 +8656,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
bnx2x_fw_command(bp, reset_code, 0);
}
- /* now it's safe to release the lock */
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
-
bnx2x_undi_int_disable(bp);
port = BP_PORT(bp);
@@ -8639,8 +8705,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
bp->fw_seq =
(SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK);
- } else
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+ }
+
+ /* now it's safe to release the lock */
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
}
}
@@ -8777,13 +8845,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
{
int pfid = BP_FUNC(bp);
- int vn = BP_E1HVN(bp);
int igu_sb_id;
u32 val;
u8 fid, igu_sb_cnt = 0;
bp->igu_base_sb = 0xff;
if (CHIP_INT_MODE_IS_BC(bp)) {
+ int vn = BP_VN(bp);
igu_sb_cnt = bp->igu_sb_cnt;
bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
FP_SB_MAX_E1x;
@@ -9416,6 +9484,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->igu_base_sb = 0;
} else {
bp->common.int_block = INT_BLOCK_IGU;
+
+ /* do not allow device reset during IGU info preocessing */
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
@@ -9447,6 +9519,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bnx2x_get_igu_cam_info(bp);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
}
/*
@@ -9473,7 +9546,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_ov = 0;
bp->mf_mode = 0;
- vn = BP_E1HVN(bp);
+ vn = BP_VN(bp);
if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -9593,13 +9666,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
/* port info */
bnx2x_get_port_hwinfo(bp);
- if (!BP_NOMCP(bp)) {
- bp->fw_seq =
- (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
- BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
- }
-
/* Get MAC addresses */
bnx2x_get_mac_hwinfo(bp);
@@ -9765,6 +9831,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
if (!BP_NOMCP(bp))
bnx2x_undi_unload(bp);
+ /* init fw_seq after undi_unload! */
+ if (!BP_NOMCP(bp)) {
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+ }
+
if (CHIP_REV_IS_FPGA(bp))
dev_err(&bp->pdev->dev, "FPGA detected\n");
@@ -10259,17 +10333,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
/* clean indirect addresses */
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
PCICFG_VENDOR_ID_OFFSET);
- /* Clean the following indirect addresses for all functions since it
+ /*
+ * Clean the following indirect addresses for all functions since it
* is not used by the driver.
*/
REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+
+ if (CHIP_IS_E1x(bp)) {
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+ }
/*
* Enable internal target-read (in case we are probed after PF FLR).
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 40266c14e6d..fc7bd0f23c0 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -1384,6 +1384,18 @@
Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108
#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8
+/* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped
+ * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
+ * parity; [31-10] Reserved; */
+#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688
+/* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped
+ * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
+ * parity; [31-10] Reserved; */
+#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0
/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
128 bit vector */
#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000
@@ -5320,7 +5332,7 @@
#define XCM_REG_XX_OVFL_EVNT_ID 0x20058
#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0)
#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1)
-#define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3)
+#define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2)
#define XMAC_CTRL_REG_RX_EN (0x1<<1)
#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6)
#define XMAC_CTRL_REG_TX_EN (0x1<<0)
@@ -5766,7 +5778,7 @@
#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
#define HW_LOCK_RESOURCE_SPIO 2
-#define HW_LOCK_RESOURCE_UNDI 5
+#define HW_LOCK_RESOURCE_RESET 5
#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
@@ -6853,6 +6865,9 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7
#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96
#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 771f6803b23..9908f2bbcf7 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -710,7 +710,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
break;
case MAC_TYPE_NONE: /* unreached */
- BNX2X_ERR("stats updated by DMAE but no MAC active\n");
+ DP(BNX2X_MSG_STATS,
+ "stats updated by DMAE but no MAC active\n");
return -1;
default: /* unreached */
@@ -1391,7 +1392,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
static void bnx2x_func_stats_base_init(struct bnx2x *bp)
{
- int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX;
+ int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;
u32 func_stx;
/* sanity */
@@ -1404,7 +1405,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)
func_stx = bp->func_stx;
for (vn = VN_0; vn < vn_max; vn++) {
- int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn;
+ int mb_idx = BP_FW_MB_IDX_VN(bp, vn);
bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
bnx2x_func_stats_init(bp);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index a047eb973e3..47b928ed08f 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2168,7 +2168,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
re_arm:
- queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
+ if (!bond->kill_timers)
+ queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
out:
read_unlock(&bond->lock);
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 7f8b20a34ee..d4fbd2e6261 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1440,7 +1440,8 @@ void bond_alb_monitor(struct work_struct *work)
}
re_arm:
- queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
+ if (!bond->kill_timers)
+ queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
out:
read_unlock(&bond->lock);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 43f2ea54108..de3d351ccb6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -777,6 +777,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
read_lock(&bond->lock);
+ if (bond->kill_timers)
+ goto out;
+
/* rejoin all groups on bond device */
__bond_resend_igmp_join_requests(bond->dev);
@@ -790,9 +793,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
__bond_resend_igmp_join_requests(vlan_dev);
}
- if (--bond->igmp_retrans > 0)
+ if ((--bond->igmp_retrans > 0) && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
-
+out:
read_unlock(&bond->lock);
}
@@ -1432,6 +1435,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
struct sk_buff *skb = *pskb;
struct slave *slave;
struct bonding *bond;
+ void (*recv_probe)(struct sk_buff *, struct bonding *,
+ struct slave *);
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
@@ -1445,11 +1450,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
if (bond->params.arp_interval)
slave->dev->last_rx = jiffies;
- if (bond->recv_probe) {
+ recv_probe = ACCESS_ONCE(bond->recv_probe);
+ if (recv_probe) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
if (likely(nskb)) {
- bond->recv_probe(nskb, bond, slave);
+ recv_probe(nskb, bond, slave);
dev_kfree_skb(nskb);
}
}
@@ -2538,7 +2544,7 @@ void bond_mii_monitor(struct work_struct *work)
}
re_arm:
- if (bond->params.miimon)
+ if (bond->params.miimon && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->mii_work,
msecs_to_jiffies(bond->params.miimon));
out:
@@ -2886,7 +2892,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
}
re_arm:
- if (bond->params.arp_interval)
+ if (bond->params.arp_interval && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
out:
read_unlock(&bond->lock);
@@ -3154,7 +3160,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
bond_ab_arp_probe(bond);
re_arm:
- if (bond->params.arp_interval)
+ if (bond->params.arp_interval && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
out:
read_unlock(&bond->lock);
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 92feac68b66..4cc6f44c2ba 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -261,11 +261,13 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
void __iomem *data = &regs->tx.dsr1_0;
u16 *payload = (u16 *)frame->data;
- /* It is safe to write into dsr[dlc+1] */
- for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
+ for (i = 0; i < frame->can_dlc / 2; i++) {
out_be16(data, *payload++);
data += 2 + _MSCAN_RESERVED_DSR_SIZE;
}
+ /* write remaining byte if necessary */
+ if (frame->can_dlc & 1)
+ out_8(data, frame->data[frame->can_dlc - 1]);
}
out_8(&regs->tx.dlr, frame->can_dlc);
@@ -330,10 +332,13 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
void __iomem *data = &regs->rx.dsr1_0;
u16 *payload = (u16 *)frame->data;
- for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
+ for (i = 0; i < frame->can_dlc / 2; i++) {
*payload++ = in_be16(data);
data += 2 + _MSCAN_RESERVED_DSR_SIZE;
}
+ /* read remaining byte if necessary */
+ if (frame->can_dlc & 1)
+ frame->data[frame->can_dlc - 1] = in_8(data);
}
out_8(&regs->canrflg, MSCAN_RXF);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index a81249246ec..2adc294f512 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -46,6 +46,7 @@
#include <linux/skbuff.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/io.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 805076c54f1..da5a5d9b8af 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1146,12 +1146,14 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
if (te && te->ctx && te->client && te->client->redirect) {
update_tcb = te->client->redirect(te->ctx, old, new, e);
if (update_tcb) {
+ rcu_read_lock();
l2t_hold(L2DATA(tdev), e);
+ rcu_read_unlock();
set_l2t_ix(tdev, tid, e);
}
}
}
- l2t_release(L2DATA(tdev), e);
+ l2t_release(tdev, e);
}
/*
@@ -1264,7 +1266,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
goto out_free;
err = -ENOMEM;
- L2DATA(dev) = t3_init_l2t(l2t_capacity);
+ RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity));
if (!L2DATA(dev))
goto out_free;
@@ -1298,16 +1300,24 @@ int cxgb3_offload_activate(struct adapter *adapter)
out_free_l2t:
t3_free_l2t(L2DATA(dev));
- L2DATA(dev) = NULL;
+ rcu_assign_pointer(dev->l2opt, NULL);
out_free:
kfree(t);
return err;
}
+static void clean_l2_data(struct rcu_head *head)
+{
+ struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
+ t3_free_l2t(d);
+}
+
+
void cxgb3_offload_deactivate(struct adapter *adapter)
{
struct t3cdev *tdev = &adapter->tdev;
struct t3c_data *t = T3C_DATA(tdev);
+ struct l2t_data *d;
remove_adapter(adapter);
if (list_empty(&adapter_list))
@@ -1315,8 +1325,11 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
free_tid_maps(&t->tid_maps);
T3C_DATA(tdev) = NULL;
- t3_free_l2t(L2DATA(tdev));
- L2DATA(tdev) = NULL;
+ rcu_read_lock();
+ d = L2DATA(tdev);
+ rcu_read_unlock();
+ rcu_assign_pointer(tdev->l2opt, NULL);
+ call_rcu(&d->rcu_head, clean_l2_data);
if (t->nofail_skb)
kfree_skb(t->nofail_skb);
kfree(t);
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index f452c400325..41540978a17 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -300,14 +300,21 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
struct net_device *dev)
{
- struct l2t_entry *e;
- struct l2t_data *d = L2DATA(cdev);
+ struct l2t_entry *e = NULL;
+ struct l2t_data *d;
+ int hash;
u32 addr = *(u32 *) neigh->primary_key;
int ifidx = neigh->dev->ifindex;
- int hash = arp_hash(addr, ifidx, d);
struct port_info *p = netdev_priv(dev);
int smt_idx = p->port_id;
+ rcu_read_lock();
+ d = L2DATA(cdev);
+ if (!d)
+ goto done_rcu;
+
+ hash = arp_hash(addr, ifidx, d);
+
write_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (e->addr == addr && e->ifindex == ifidx &&
@@ -338,6 +345,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
}
done:
write_unlock_bh(&d->lock);
+done_rcu:
+ rcu_read_unlock();
return e;
}
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
index 7a12d52ed4f..c5f54796e2c 100644
--- a/drivers/net/cxgb3/l2t.h
+++ b/drivers/net/cxgb3/l2t.h
@@ -76,6 +76,7 @@ struct l2t_data {
atomic_t nfree; /* number of free entries */
rwlock_t lock;
struct l2t_entry l2tab[0];
+ struct rcu_head rcu_head; /* to handle rcu cleanup */
};
typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
@@ -99,7 +100,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
/*
* Getting to the L2 data from an offload device.
*/
-#define L2DATA(dev) ((dev)->l2opt)
+#define L2DATA(cdev) (rcu_dereference((cdev)->l2opt))
#define W_TCB_L2T_IX 0
#define S_TCB_L2T_IX 7
@@ -126,15 +127,22 @@ static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
return t3_l2t_send_slow(dev, skb, e);
}
-static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
+static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
{
- if (atomic_dec_and_test(&e->refcnt))
+ struct l2t_data *d;
+
+ rcu_read_lock();
+ d = L2DATA(t);
+
+ if (atomic_dec_and_test(&e->refcnt) && d)
t3_l2e_free(d, e);
+
+ rcu_read_unlock();
}
static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
{
- if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
+ if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
atomic_dec(&d->nfree);
}
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index c9957b7f17b..b4efa292fd6 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -3712,6 +3712,9 @@ static int __devinit init_one(struct pci_dev *pdev,
setup_debugfs(adapter);
}
+ /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
+ pdev->needs_freset = 1;
+
if (is_offload(adapter))
attach_ulds(adapter);
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 8545c7aa93e..a5a89ecb6f3 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -4026,6 +4026,12 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
checksum += eeprom_data;
}
+#ifdef CONFIG_PARISC
+ /* This is a signature and not a checksum on HP c8000 */
+ if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6))
+ return E1000_SUCCESS;
+
+#endif
if (checksum == (u16) EEPROM_SUM)
return E1000_SUCCESS;
else {
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 25a8c2adb00..0caf3c323ec 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -1669,10 +1669,10 @@ static int gfar_get_cls_all(struct gfar_private *priv,
u32 i = 0;
list_for_each_entry(comp, &priv->rx_list.list, list) {
- if (i <= cmd->rule_cnt) {
- rule_locs[i] = comp->fs.location;
- i++;
- }
+ if (i == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[i] = comp->fs.location;
+ i++;
}
cmd->data = MAX_FILER_IDX;
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 16ce45c1193..52a39000c42 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -428,6 +428,7 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
+ greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
/* Wrap around descriptor ring */
if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
@@ -490,7 +491,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
if (nr_frags != 0)
status = GRETH_TXBD_MORE;
- status |= GRETH_TXBD_CSALL;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ status |= GRETH_TXBD_CSALL;
status |= skb_headlen(skb) & GRETH_BD_LEN;
if (greth->tx_next == GRETH_TXBD_NUM_MASK)
status |= GRETH_BD_WR;
@@ -513,7 +515,9 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
greth->tx_skbuff[curr_tx] = NULL;
bdp = greth->tx_bd_base + curr_tx;
- status = GRETH_TXBD_CSALL | GRETH_BD_EN;
+ status = GRETH_BD_EN;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ status |= GRETH_TXBD_CSALL;
status |= frag->size & GRETH_BD_LEN;
/* Wrap around descriptor ring */
@@ -641,6 +645,7 @@ static void greth_clean_tx(struct net_device *dev)
dev->stats.tx_fifo_errors++;
}
dev->stats.tx_packets++;
+ dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];
greth->tx_last = NEXT_TX(greth->tx_last);
greth->tx_free++;
}
@@ -695,6 +700,7 @@ static void greth_clean_tx_gbit(struct net_device *dev)
greth->tx_skbuff[greth->tx_last] = NULL;
greth_update_tx_stats(dev, stat);
+ dev->stats.tx_bytes += skb->len;
bdp = greth->tx_bd_base + greth->tx_last;
@@ -796,6 +802,7 @@ static int greth_rx(struct net_device *dev, int limit)
memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
skb->protocol = eth_type_trans(skb, dev);
+ dev->stats.rx_bytes += pkt_len;
dev->stats.rx_packets++;
netif_receive_skb(skb);
}
@@ -910,6 +917,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
netif_receive_skb(skb);
greth->rx_skbuff[greth->rx_cur] = newskb;
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
index 9a0040dee4d..232a622a85b 100644
--- a/drivers/net/greth.h
+++ b/drivers/net/greth.h
@@ -103,6 +103,7 @@ struct greth_private {
unsigned char *tx_bufs[GRETH_TXBD_NUM];
unsigned char *rx_bufs[GRETH_RXBD_NUM];
+ u16 tx_bufs_length[GRETH_TXBD_NUM];
u16 tx_next;
u16 tx_last;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 3e667926940..d393f1e764e 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -636,8 +636,8 @@ static int ibmveth_open(struct net_device *netdev)
netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
netdev->irq, rc);
do {
- rc = h_free_logical_lan(adapter->vdev->unit_address);
- } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
+ lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
+ } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
goto err_out;
}
@@ -757,7 +757,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
struct ibmveth_adapter *adapter = netdev_priv(dev);
unsigned long set_attr, clr_attr, ret_attr;
unsigned long set_attr6, clr_attr6;
- long ret, ret6;
+ long ret, ret4, ret6;
int rc1 = 0, rc2 = 0;
int restart = 0;
@@ -770,6 +770,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
set_attr = 0;
clr_attr = 0;
+ set_attr6 = 0;
+ clr_attr6 = 0;
if (data) {
set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
@@ -784,16 +786,20 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
!(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
(ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
- ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
+ ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
set_attr, &ret_attr);
- if (ret != H_SUCCESS) {
+ if (ret4 != H_SUCCESS) {
netdev_err(dev, "unable to change IPv4 checksum "
"offload settings. %d rc=%ld\n",
- data, ret);
+ data, ret4);
+
+ h_illan_attributes(adapter->vdev->unit_address,
+ set_attr, clr_attr, &ret_attr);
+
+ if (data == 1)
+ dev->features &= ~NETIF_F_IP_CSUM;
- ret = h_illan_attributes(adapter->vdev->unit_address,
- set_attr, clr_attr, &ret_attr);
} else {
adapter->fw_ipv4_csum_support = data;
}
@@ -804,15 +810,18 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
if (ret6 != H_SUCCESS) {
netdev_err(dev, "unable to change IPv6 checksum "
"offload settings. %d rc=%ld\n",
- data, ret);
+ data, ret6);
+
+ h_illan_attributes(adapter->vdev->unit_address,
+ set_attr6, clr_attr6, &ret_attr);
+
+ if (data == 1)
+ dev->features &= ~NETIF_F_IPV6_CSUM;
- ret = h_illan_attributes(adapter->vdev->unit_address,
- set_attr6, clr_attr6,
- &ret_attr);
} else
adapter->fw_ipv6_csum_support = data;
- if (ret != H_SUCCESS || ret6 != H_SUCCESS)
+ if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
adapter->rx_csum = data;
else
rc1 = -EIO;
@@ -930,6 +939,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
union ibmveth_buf_desc descs[6];
int last, i;
int force_bounce = 0;
+ dma_addr_t dma_addr;
/*
* veth handles a maximum of 6 segments including the header, so
@@ -994,17 +1004,16 @@ retry_bounce:
}
/* Map the header */
- descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
- skb_headlen(skb),
- DMA_TO_DEVICE);
- if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
+ dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
goto map_failed;
descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
+ descs[0].fields.address = dma_addr;
/* Map the frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- unsigned long dma_addr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
@@ -1026,7 +1035,12 @@ retry_bounce:
netdev->stats.tx_bytes += skb->len;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++)
+ dma_unmap_single(&adapter->vdev->dev,
+ descs[0].fields.address,
+ descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
+
+ for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
DMA_TO_DEVICE);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 22790394318..e1fcc958927 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1321,8 +1321,8 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (ring_is_rsc_enabled(rx_ring))
pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
- /* if this is a skb from previous receive DMA will be 0 */
- if (rx_buffer_info->dma) {
+ /* linear means we are building an skb from multiple pages */
+ if (!skb_is_nonlinear(skb)) {
u16 hlen;
if (pkt_is_rsc &&
!(staterr & IXGBE_RXD_STAT_EOP) &&
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 05172c39a0c..376e3e94bae 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
dest = macvlan_hash_lookup(port, eth->h_dest);
if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
/* send to lowerdev first for its network taps */
- vlan->forward(vlan->lowerdev, skb);
+ dev_forward_skb(vlan->lowerdev, skb);
return NET_XMIT_SUCCESS;
}
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 6e03de034ac..f76ab6bf309 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -172,7 +172,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
memset(ring->buf, 0, ring->buf_size);
ring->qp_state = MLX4_QP_STATE_RST;
- ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
+ ring->doorbell_qpn = ring->qp.qpn << 8;
mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
ring->cqn, &ring->context);
@@ -791,7 +791,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
skb_orphan(skb);
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
- *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn;
+ *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
op_own |= htonl((bf_index & 0xffff) << 8);
/* Ensure new descirptor hits memory
* before setting ownership of this descriptor to HW */
@@ -812,7 +812,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
tx_desc->ctrl.owner_opcode = op_own;
wmb();
- writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
+ iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
}
/* Poll CQ here */
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index dfc82720065..e8882023576 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -307,6 +307,11 @@ static ssize_t store_enabled(struct netconsole_target *nt,
return err;
if (enabled < 0 || enabled > 1)
return -EINVAL;
+ if (enabled == nt->enabled) {
+ printk(KERN_INFO "netconsole: network logging has already %s\n",
+ nt->enabled ? "started" : "stopped");
+ return -EINVAL;
+ }
if (enabled) { /* 1 */
@@ -799,5 +804,11 @@ static void __exit cleanup_netconsole(void)
}
}
-module_init(init_netconsole);
+/*
+ * Use late_initcall to ensure netconsole is
+ * initialized after network device driver if built-in.
+ *
+ * late_initcall() and module_init() are identical if built as module.
+ */
+late_initcall(init_netconsole);
module_exit(cleanup_netconsole);
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index 59fac77d0db..a09a07197eb 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -127,8 +127,8 @@ struct pch_gbe_regs {
/* Reset */
#define PCH_GBE_ALL_RST 0x80000000 /* All reset */
-#define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */
-#define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */
+#define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */
+#define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */
/* TCP/IP Accelerator Control */
#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */
@@ -276,6 +276,9 @@ struct pch_gbe_regs {
#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */
#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */
+/* RX DMA STATUS */
+#define PCH_GBE_IDLE_CHECK 0xFFFFFFFE
+
/* Wake On LAN Status */
#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */
#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */
@@ -471,6 +474,7 @@ struct pch_gbe_tx_desc {
struct pch_gbe_buffer {
struct sk_buff *skb;
dma_addr_t dma;
+ unsigned char *rx_buffer;
unsigned long time_stamp;
u16 length;
bool mapped;
@@ -511,6 +515,9 @@ struct pch_gbe_tx_ring {
struct pch_gbe_rx_ring {
struct pch_gbe_rx_desc *desc;
dma_addr_t dma;
+ unsigned char *rx_buff_pool;
+ dma_addr_t rx_buff_pool_logic;
+ unsigned int rx_buff_pool_size;
unsigned int size;
unsigned int count;
unsigned int next_to_use;
@@ -622,6 +629,7 @@ struct pch_gbe_adapter {
unsigned long rx_buffer_len;
unsigned long tx_queue_len;
bool have_msi;
+ bool rx_stop_flag;
};
extern const char pch_driver_version[];
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index eac3c5ca973..b8b4ba27b0e 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -20,7 +20,6 @@
#include "pch_gbe.h"
#include "pch_gbe_api.h"
-#include <linux/prefetch.h>
#define DRV_VERSION "1.00"
const char pch_driver_version[] = DRV_VERSION;
@@ -34,11 +33,15 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
#define PCH_GBE_COPYBREAK_DEFAULT 256
#define PCH_GBE_PCI_BAR 1
+#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
/* Macros for ML7223 */
#define PCI_VENDOR_ID_ROHM 0x10db
#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
+/* Macros for ML7831 */
+#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
+
#define PCH_GBE_TX_WEIGHT 64
#define PCH_GBE_RX_WEIGHT 64
#define PCH_GBE_RX_BUFFER_WRITE 16
@@ -52,6 +55,7 @@ const char pch_driver_version[] = DRV_VERSION;
)
/* Ethertype field values */
+#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
#define PCH_GBE_FRAME_SIZE_2048 2048
#define PCH_GBE_FRAME_SIZE_4096 4096
@@ -83,10 +87,12 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_INT_ENABLE_MASK ( \
PCH_GBE_INT_RX_DMA_CMPLT | \
PCH_GBE_INT_RX_DSC_EMP | \
+ PCH_GBE_INT_RX_FIFO_ERR | \
PCH_GBE_INT_WOL_DET | \
PCH_GBE_INT_TX_CMPLT \
)
+#define PCH_GBE_INT_DISABLE_ALL 0
static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
@@ -138,6 +144,27 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
if (!tmp)
pr_err("Error: busy bit is not cleared\n");
}
+
+/**
+ * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
+ * @reg: Pointer of register
+ * @busy: Busy bit
+ */
+static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
+{
+ u32 tmp;
+ int ret = -1;
+ /* wait busy */
+ tmp = 20;
+ while ((ioread32(reg) & bit) && --tmp)
+ udelay(5);
+ if (!tmp)
+ pr_err("Error: busy bit is not cleared\n");
+ else
+ ret = 0;
+ return ret;
+}
+
/**
* pch_gbe_mac_mar_set - Set MAC address register
* @hw: Pointer to the HW structure
@@ -189,6 +216,17 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
return;
}
+static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
+{
+ /* Read the MAC address. and store to the private data */
+ pch_gbe_mac_read_mac_addr(hw);
+ iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
+ pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
+ /* Setup the MAC address */
+ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
+ return;
+}
+
/**
* pch_gbe_mac_init_rx_addrs - Initialize receive address's
* @hw: Pointer to the HW structure
@@ -671,13 +709,8 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
tcpip = ioread32(&hw->reg->TCPIP_ACC);
- if (netdev->features & NETIF_F_RXCSUM) {
- tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF;
- tcpip |= PCH_GBE_RX_TCPIPACC_EN;
- } else {
- tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
- tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
- }
+ tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
+ tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
iowrite32(tcpip, &hw->reg->TCPIP_ACC);
return;
}
@@ -717,13 +750,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
iowrite32(rdba, &hw->reg->RX_DSC_BASE);
iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
-
- /* Enables Receive DMA */
- rxdma = ioread32(&hw->reg->DMA_CTRL);
- rxdma |= PCH_GBE_RX_DMA_EN;
- iowrite32(rxdma, &hw->reg->DMA_CTRL);
- /* Enables Receive */
- iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
}
/**
@@ -1097,6 +1123,48 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
spin_unlock_irqrestore(&adapter->stats_lock, flags);
}
+static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 rxdma;
+ u16 value;
+ int ret;
+
+ /* Disable Receive DMA */
+ rxdma = ioread32(&hw->reg->DMA_CTRL);
+ rxdma &= ~PCH_GBE_RX_DMA_EN;
+ iowrite32(rxdma, &hw->reg->DMA_CTRL);
+ /* Wait Rx DMA BUS is IDLE */
+ ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
+ if (ret) {
+ /* Disable Bus master */
+ pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
+ value &= ~PCI_COMMAND_MASTER;
+ pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
+ /* Stop Receive */
+ pch_gbe_mac_reset_rx(hw);
+ /* Enable Bus master */
+ value |= PCI_COMMAND_MASTER;
+ pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
+ } else {
+ /* Stop Receive */
+ pch_gbe_mac_reset_rx(hw);
+ }
+}
+
+static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
+{
+ u32 rxdma;
+
+ /* Enables Receive DMA */
+ rxdma = ioread32(&hw->reg->DMA_CTRL);
+ rxdma |= PCH_GBE_RX_DMA_EN;
+ iowrite32(rxdma, &hw->reg->DMA_CTRL);
+ /* Enables Receive */
+ iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
+ return;
+}
+
/**
* pch_gbe_intr - Interrupt Handler
* @irq: Interrupt number
@@ -1123,7 +1191,17 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
adapter->stats.intr_rx_frame_err_count++;
if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
- adapter->stats.intr_rx_fifo_err_count++;
+ if (!adapter->rx_stop_flag) {
+ adapter->stats.intr_rx_fifo_err_count++;
+ pr_debug("Rx fifo over run\n");
+ adapter->rx_stop_flag = true;
+ int_en = ioread32(&hw->reg->INT_EN);
+ iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
+ &hw->reg->INT_EN);
+ pch_gbe_stop_receive(adapter);
+ int_st |= ioread32(&hw->reg->INT_ST);
+ int_st = int_st & ioread32(&hw->reg->INT_EN);
+ }
if (int_st & PCH_GBE_INT_RX_DMA_ERR)
adapter->stats.intr_rx_dma_err_count++;
if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
@@ -1135,21 +1213,18 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
/* When Rx descriptor is empty */
if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
adapter->stats.intr_rx_dsc_empty_count++;
- pr_err("Rx descriptor is empty\n");
+ pr_debug("Rx descriptor is empty\n");
int_en = ioread32(&hw->reg->INT_EN);
iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
if (hw->mac.tx_fc_enable) {
/* Set Pause packet */
pch_gbe_mac_set_pause_packet(hw);
}
- if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
- == 0) {
- return IRQ_HANDLED;
- }
}
/* When request status is Receive interruption */
- if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) {
+ if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
+ (adapter->rx_stop_flag == true)) {
if (likely(napi_schedule_prep(&adapter->napi))) {
/* Enable only Rx Descriptor empty */
atomic_inc(&adapter->irq_sem);
@@ -1185,29 +1260,23 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
unsigned int i;
unsigned int bufsz;
- bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN;
+ bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
i = rx_ring->next_to_use;
while ((cleaned_count--)) {
buffer_info = &rx_ring->buffer_info[i];
- skb = buffer_info->skb;
- if (skb) {
- skb_trim(skb, 0);
- } else {
- skb = netdev_alloc_skb(netdev, bufsz);
- if (unlikely(!skb)) {
- /* Better luck next round */
- adapter->stats.rx_alloc_buff_failed++;
- break;
- }
- /* 64byte align */
- skb_reserve(skb, PCH_GBE_DMA_ALIGN);
-
- buffer_info->skb = skb;
- buffer_info->length = adapter->rx_buffer_len;
+ skb = netdev_alloc_skb(netdev, bufsz);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ adapter->stats.rx_alloc_buff_failed++;
+ break;
}
+ /* align */
+ skb_reserve(skb, NET_IP_ALIGN);
+ buffer_info->skb = skb;
+
buffer_info->dma = dma_map_single(&pdev->dev,
- skb->data,
+ buffer_info->rx_buffer,
buffer_info->length,
DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
@@ -1240,6 +1309,36 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
return;
}
+static int
+pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_buffer *buffer_info;
+ unsigned int i;
+ unsigned int bufsz;
+ unsigned int size;
+
+ bufsz = adapter->rx_buffer_len;
+
+ size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
+ rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
+ &rx_ring->rx_buff_pool_logic,
+ GFP_KERNEL);
+ if (!rx_ring->rx_buff_pool) {
+ pr_err("Unable to allocate memory for the receive poll buffer\n");
+ return -ENOMEM;
+ }
+ memset(rx_ring->rx_buff_pool, 0, size);
+ rx_ring->rx_buff_pool_size = size;
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
+ buffer_info->length = bufsz;
+ }
+ return 0;
+}
+
/**
* pch_gbe_alloc_tx_buffers - Allocate transmit buffers
* @adapter: Board private structure
@@ -1285,7 +1384,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
struct sk_buff *skb;
unsigned int i;
unsigned int cleaned_count = 0;
- bool cleaned = false;
+ bool cleaned = true;
pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
@@ -1296,7 +1395,6 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
- cleaned = true;
buffer_info = &tx_ring->buffer_info[i];
skb = buffer_info->skb;
@@ -1339,8 +1437,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
/* weight of a sort for tx, to avoid endless transmit cleanup */
- if (cleaned_count++ == PCH_GBE_TX_WEIGHT)
+ if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
+ cleaned = false;
break;
+ }
}
pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
cleaned_count);
@@ -1380,7 +1480,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
unsigned int i;
unsigned int cleaned_count = 0;
bool cleaned = false;
- struct sk_buff *skb, *new_skb;
+ struct sk_buff *skb;
u8 dma_status;
u16 gbec_status;
u32 tcp_ip_status;
@@ -1401,13 +1501,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
rx_desc->gbec_status = DSC_INIT16;
buffer_info = &rx_ring->buffer_info[i];
skb = buffer_info->skb;
+ buffer_info->skb = NULL;
/* unmap dma */
dma_unmap_single(&pdev->dev, buffer_info->dma,
buffer_info->length, DMA_FROM_DEVICE);
buffer_info->mapped = false;
- /* Prefetch the packet */
- prefetch(skb->data);
pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
"TCP:0x%08x] BufInf = 0x%p\n",
@@ -1427,70 +1526,16 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
pr_err("Receive CRC Error\n");
} else {
/* get receive length */
- /* length convert[-3] */
- length = (rx_desc->rx_words_eob) - 3;
-
- /* Decide the data conversion method */
- if (!(netdev->features & NETIF_F_RXCSUM)) {
- /* [Header:14][payload] */
- if (NET_IP_ALIGN) {
- /* Because alignment differs,
- * the new_skb is newly allocated,
- * and data is copied to new_skb.*/
- new_skb = netdev_alloc_skb(netdev,
- length + NET_IP_ALIGN);
- if (!new_skb) {
- /* dorrop error */
- pr_err("New skb allocation "
- "Error\n");
- goto dorrop;
- }
- skb_reserve(new_skb, NET_IP_ALIGN);
- memcpy(new_skb->data, skb->data,
- length);
- skb = new_skb;
- } else {
- /* DMA buffer is used as SKB as it is.*/
- buffer_info->skb = NULL;
- }
- } else {
- /* [Header:14][padding:2][payload] */
- /* The length includes padding length */
- length = length - PCH_GBE_DMA_PADDING;
- if ((length < copybreak) ||
- (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
- /* Because alignment differs,
- * the new_skb is newly allocated,
- * and data is copied to new_skb.
- * Padding data is deleted
- * at the time of a copy.*/
- new_skb = netdev_alloc_skb(netdev,
- length + NET_IP_ALIGN);
- if (!new_skb) {
- /* dorrop error */
- pr_err("New skb allocation "
- "Error\n");
- goto dorrop;
- }
- skb_reserve(new_skb, NET_IP_ALIGN);
- memcpy(new_skb->data, skb->data,
- ETH_HLEN);
- memcpy(&new_skb->data[ETH_HLEN],
- &skb->data[ETH_HLEN +
- PCH_GBE_DMA_PADDING],
- length - ETH_HLEN);
- skb = new_skb;
- } else {
- /* Padding data is deleted
- * by moving header data.*/
- memmove(&skb->data[PCH_GBE_DMA_PADDING],
- &skb->data[0], ETH_HLEN);
- skb_reserve(skb, NET_IP_ALIGN);
- buffer_info->skb = NULL;
- }
- }
- /* The length includes FCS length */
- length = length - ETH_FCS_LEN;
+ /* length convert[-3], length includes FCS length */
+ length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
+ if (rx_desc->rx_words_eob & 0x02)
+ length = length - 4;
+ /*
+ * buffer_info->rx_buffer: [Header:14][payload]
+ * skb->data: [Reserve:2][Header:14][payload]
+ */
+ memcpy(skb->data, buffer_info->rx_buffer, length);
+
/* update status of driver */
adapter->stats.rx_bytes += length;
adapter->stats.rx_packets++;
@@ -1509,7 +1554,6 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
pr_debug("Receive skb->ip_summed: %d length: %d\n",
skb->ip_summed, length);
}
-dorrop:
/* return some buffers to hardware, one at a time is too slow */
if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
pch_gbe_alloc_rx_buffers(adapter, rx_ring,
@@ -1714,9 +1758,15 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
pr_err("Error: can't bring device up\n");
return err;
}
+ err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
+ if (err) {
+ pr_err("Error: can't bring device up\n");
+ return err;
+ }
pch_gbe_alloc_tx_buffers(adapter, tx_ring);
pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
adapter->tx_queue_len = netdev->tx_queue_len;
+ pch_gbe_start_receive(&adapter->hw);
mod_timer(&adapter->watchdog_timer, jiffies);
@@ -1734,6 +1784,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
void pch_gbe_down(struct pch_gbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
/* signal that we're down so the interrupt handler does not
* reschedule our watchdog timer */
@@ -1752,6 +1803,12 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
pch_gbe_reset(adapter);
pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
+
+ pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
+ rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
+ rx_ring->rx_buff_pool_logic = 0;
+ rx_ring->rx_buff_pool_size = 0;
+ rx_ring->rx_buff_pool = NULL;
}
/**
@@ -2004,6 +2061,8 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
int max_frame;
+ unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
+ int err;
max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
@@ -2018,14 +2077,24 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
else
- adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE;
- netdev->mtu = new_mtu;
- adapter->hw.mac.max_frame_size = max_frame;
+ adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
- if (netif_running(netdev))
- pch_gbe_reinit_locked(adapter);
- else
+ if (netif_running(netdev)) {
+ pch_gbe_down(adapter);
+ err = pch_gbe_up(adapter);
+ if (err) {
+ adapter->rx_buffer_len = old_rx_buffer_len;
+ pch_gbe_up(adapter);
+ return -ENOMEM;
+ } else {
+ netdev->mtu = new_mtu;
+ adapter->hw.mac.max_frame_size = max_frame;
+ }
+ } else {
pch_gbe_reset(adapter);
+ netdev->mtu = new_mtu;
+ adapter->hw.mac.max_frame_size = max_frame;
+ }
pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
@@ -2099,33 +2168,39 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
{
struct pch_gbe_adapter *adapter =
container_of(napi, struct pch_gbe_adapter, napi);
- struct net_device *netdev = adapter->netdev;
int work_done = 0;
bool poll_end_flag = false;
bool cleaned = false;
+ u32 int_en;
pr_debug("budget : %d\n", budget);
- /* Keep link state information with original netdev */
- if (!netif_carrier_ok(netdev)) {
- poll_end_flag = true;
- } else {
- cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
- pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
+ pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
+ cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
- if (cleaned)
- work_done = budget;
- /* If no Tx and not enough Rx work done,
- * exit the polling mode
- */
- if ((work_done < budget) || !netif_running(netdev))
- poll_end_flag = true;
- }
+ if (!cleaned)
+ work_done = budget;
+ /* If no Tx and not enough Rx work done,
+ * exit the polling mode
+ */
+ if (work_done < budget)
+ poll_end_flag = true;
if (poll_end_flag) {
napi_complete(napi);
+ if (adapter->rx_stop_flag) {
+ adapter->rx_stop_flag = false;
+ pch_gbe_start_receive(&adapter->hw);
+ }
pch_gbe_irq_enable(adapter);
- }
+ } else
+ if (adapter->rx_stop_flag) {
+ adapter->rx_stop_flag = false;
+ pch_gbe_start_receive(&adapter->hw);
+ int_en = ioread32(&adapter->hw.reg->INT_EN);
+ iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
+ &adapter->hw.reg->INT_EN);
+ }
pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
poll_end_flag, work_done, budget);
@@ -2452,6 +2527,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
.class = (PCI_CLASS_NETWORK_ETHERNET << 8),
.class_mask = (0xFFFF00)
},
+ {.vendor = PCI_VENDOR_ID_ROHM,
+ .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+ .class_mask = (0xFFFF00)
+ },
/* required last entry */
{0}
};
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index cb6e0b486b1..edd7304773e 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -589,7 +589,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
prune_rx_ts(dp83640);
if (list_empty(&dp83640->rxpool)) {
- pr_warning("dp83640: rx timestamp pool is empty\n");
+ pr_debug("dp83640: rx timestamp pool is empty\n");
goto out;
}
rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
@@ -612,7 +612,7 @@ static void decode_txts(struct dp83640_private *dp83640,
skb = skb_dequeue(&dp83640->tx_queue);
if (!skb) {
- pr_warning("dp83640: have timestamp but tx_queue empty\n");
+ pr_debug("dp83640: have timestamp but tx_queue empty\n");
return;
}
ns = phy2txts(phy_txts);
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 10e5d985afa..edfa15d2e79 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1465,7 +1465,12 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
continue;
}
- mtu = pch->chan->mtu - hdrlen;
+ /*
+ * hdrlen includes the 2-byte PPP protocol field, but the
+ * MTU counts only the payload excluding the protocol field.
+ * (RFC1661 Section 2)
+ */
+ mtu = pch->chan->mtu - (hdrlen - 2);
if (mtu < 4)
mtu = 4;
if (flen > mtu)
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index eae542a7e98..89f829f5f72 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -285,8 +285,10 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
ip_send_check(iph);
ip_local_out(skb);
+ return 1;
tx_error:
+ kfree_skb(skb);
return 1;
}
@@ -305,11 +307,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
}
header = (struct pptp_gre_header *)(skb->data);
+ headersize = sizeof(*header);
/* test if acknowledgement present */
if (PPTP_GRE_IS_A(header->ver)) {
- __u32 ack = (PPTP_GRE_IS_S(header->flags)) ?
- header->ack : header->seq; /* ack in different place if S = 0 */
+ __u32 ack;
+
+ if (!pskb_may_pull(skb, headersize))
+ goto drop;
+ header = (struct pptp_gre_header *)(skb->data);
+
+ /* ack in different place if S = 0 */
+ ack = PPTP_GRE_IS_S(header->flags) ? header->ack : header->seq;
ack = ntohl(ack);
@@ -318,21 +327,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
/* also handle sequence number wrap-around */
if (WRAPPED(ack, opt->ack_recv))
opt->ack_recv = ack;
+ } else {
+ headersize -= sizeof(header->ack);
}
-
/* test if payload present */
if (!PPTP_GRE_IS_S(header->flags))
goto drop;
- headersize = sizeof(*header);
payload_len = ntohs(header->payload_len);
seq = ntohl(header->seq);
- /* no ack present? */
- if (!PPTP_GRE_IS_A(header->ver))
- headersize -= sizeof(header->ack);
/* check for incomplete packet (length smaller than expected) */
- if (skb->len - headersize < payload_len)
+ if (!pskb_may_pull(skb, headersize + payload_len))
goto drop;
payload = skb->data + headersize;
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 1a3033d8e7e..d17d0624c5e 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -40,6 +40,7 @@
#include <linux/clk.h>
#include <linux/phy.h>
#include <linux/io.h>
+#include <linux/interrupt.h>
#include <linux/types.h>
#include <asm/pgtable.h>
#include <asm/system.h>
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 02339b3352e..6d657cabb95 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -407,6 +407,7 @@ enum rtl_register_content {
RxOK = 0x0001,
/* RxStatusDesc */
+ RxBOVF = (1 << 24),
RxFOVF = (1 << 23),
RxRWT = (1 << 22),
RxRES = (1 << 21),
@@ -682,6 +683,7 @@ struct rtl8169_private {
struct mii_if_info mii;
struct rtl8169_counters counters;
u32 saved_wolopts;
+ u32 opts1_mask;
struct rtl_fw {
const struct firmware *fw;
@@ -710,6 +712,7 @@ MODULE_FIRMWARE(FIRMWARE_8168D_1);
MODULE_FIRMWARE(FIRMWARE_8168D_2);
MODULE_FIRMWARE(FIRMWARE_8168E_1);
MODULE_FIRMWARE(FIRMWARE_8168E_2);
+MODULE_FIRMWARE(FIRMWARE_8168E_3);
MODULE_FIRMWARE(FIRMWARE_8105E_1);
static int rtl8169_open(struct net_device *dev);
@@ -2856,7 +2859,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0004);
rtl_writephy(tp, 0x1f, 0x0007);
rtl_writephy(tp, 0x1e, 0x0020);
- rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
+ rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
rtl_writephy(tp, 0x1f, 0x0002);
rtl_writephy(tp, 0x1f, 0x0000);
rtl_writephy(tp, 0x0d, 0x0007);
@@ -3077,6 +3080,14 @@ static void rtl8169_phy_reset(struct net_device *dev,
netif_err(tp, link, dev, "PHY reset failed\n");
}
+static bool rtl_tbi_enabled(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
+ (RTL_R8(PHYstatus) & TBI_Enable);
+}
+
static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -3109,7 +3120,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full : 0));
- if (RTL_R8(PHYstatus) & TBI_Enable)
+ if (rtl_tbi_enabled(tp))
netif_info(tp, link, dev, "TBI auto-negotiating\n");
}
@@ -3305,6 +3316,37 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
}
}
+static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_29:
+ case RTL_GIGA_MAC_VER_30:
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_34:
+ RTL_W32(RxConfig, RTL_R32(RxConfig) |
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
+{
+ if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
+ return false;
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, MII_BMCR, 0x0000);
+
+ rtl_wol_suspend_quirk(tp);
+
+ return true;
+}
+
static void r810x_phy_power_down(struct rtl8169_private *tp)
{
rtl_writephy(tp, 0x1f, 0x0000);
@@ -3319,11 +3361,8 @@ static void r810x_phy_power_up(struct rtl8169_private *tp)
static void r810x_pll_power_down(struct rtl8169_private *tp)
{
- if (__rtl8169_get_wol(tp) & WAKE_ANY) {
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, MII_BMCR, 0x0000);
+ if (rtl_wol_pll_power_down(tp))
return;
- }
r810x_phy_power_down(tp);
}
@@ -3412,16 +3451,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
tp->mac_version == RTL_GIGA_MAC_VER_33)
rtl_ephy_write(ioaddr, 0x19, 0xff64);
- if (__rtl8169_get_wol(tp) & WAKE_ANY) {
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, MII_BMCR, 0x0000);
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
- tp->mac_version == RTL_GIGA_MAC_VER_33)
- RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
- AcceptMulticast | AcceptMyPhys);
+ if (rtl_wol_pll_power_down(tp))
return;
- }
r8168_phy_power_down(tp);
@@ -3727,8 +3758,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
RTL_W8(Cfg9346, Cfg9346_Lock);
- if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) &&
- (RTL_R8(PHYstatus) & TBI_Enable)) {
+ if (rtl_tbi_enabled(tp)) {
tp->set_speed = rtl8169_set_speed_tbi;
tp->get_settings = rtl8169_gset_tbi;
tp->phy_reset_enable = rtl8169_tbi_reset_enable;
@@ -3777,6 +3807,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->intr_event = cfg->intr_event;
tp->napi_event = cfg->napi_event;
+ tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
+ ~(RxBOVF | RxFOVF) : ~0;
+
init_timer(&tp->timer);
tp->timer.data = (unsigned long) dev;
tp->timer.function = rtl8169_phy_timer;
@@ -3988,6 +4021,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
while (RTL_R8(TxPoll) & NPQ)
udelay(20);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
udelay(100);
} else {
@@ -5314,7 +5348,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
u32 status;
rmb();
- status = le32_to_cpu(desc->opts1);
+ status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
if (status & DescOwn)
break;
@@ -5766,11 +5800,30 @@ static const struct dev_pm_ops rtl8169_pm_ops = {
#endif /* !CONFIG_PM */
+static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ /* WoL fails with 8168b when the receiver is disabled. */
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ pci_clear_master(tp->pci_dev);
+
+ RTL_W8(ChipCmd, CmdRxEnb);
+ /* PCI commit */
+ RTL_R8(ChipCmd);
+ break;
+ default:
+ break;
+ }
+}
+
static void rtl_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
rtl8169_net_suspend(dev);
@@ -5784,16 +5837,9 @@ static void rtl_shutdown(struct pci_dev *pdev)
spin_unlock_irq(&tp->lock);
if (system_state == SYSTEM_POWER_OFF) {
- /* WoL fails with 8168b when the receiver is disabled. */
- if ((tp->mac_version == RTL_GIGA_MAC_VER_11 ||
- tp->mac_version == RTL_GIGA_MAC_VER_12 ||
- tp->mac_version == RTL_GIGA_MAC_VER_17) &&
- (tp->features & RTL_FEATURE_WOL)) {
- pci_clear_master(pdev);
-
- RTL_W8(ChipCmd, CmdRxEnb);
- /* PCI commit */
- RTL_R8(ChipCmd);
+ if (__rtl8169_get_wol(tp) & WAKE_ANY) {
+ rtl_wol_suspend_quirk(tp);
+ rtl_wol_shutdown_quirk(tp);
}
pci_wake_from_d3(pdev, true);
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index faca764aa21..b59abc706d9 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1050,7 +1050,6 @@ static int efx_init_io(struct efx_nic *efx)
{
struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask;
- bool use_wc;
int rc;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1101,21 +1100,8 @@ static int efx_init_io(struct efx_nic *efx)
rc = -EIO;
goto fail3;
}
-
- /* bug22643: If SR-IOV is enabled then tx push over a write combined
- * mapping is unsafe. We need to disable write combining in this case.
- * MSI is unsupported when SR-IOV is enabled, and the firmware will
- * have removed the MSI capability. So write combining is safe if
- * there is an MSI capability.
- */
- use_wc = (!EFX_WORKAROUND_22643(efx) ||
- pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
- if (use_wc)
- efx->membase = ioremap_wc(efx->membase_phys,
- efx->type->mem_map_size);
- else
- efx->membase = ioremap_nocache(efx->membase_phys,
- efx->type->mem_map_size);
+ efx->membase = ioremap_nocache(efx->membase_phys,
+ efx->type->mem_map_size);
if (!efx->membase) {
netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n",
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index cc978803d48..751d1ec112c 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -103,7 +103,6 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
_efx_writed(efx, value->u32[2], reg + 8);
_efx_writed(efx, value->u32[3], reg + 12);
#endif
- wmb();
mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
@@ -126,7 +125,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
__raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
- wmb();
mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
@@ -141,7 +139,6 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
/* No lock required */
_efx_writed(efx, value->u32[0], reg);
- wmb();
}
/* Read a 128-bit CSR, locking as appropriate. */
@@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
spin_lock_irqsave(&efx->biu_lock, flags);
value->u32[0] = _efx_readd(efx, reg + 0);
- rmb();
value->u32[1] = _efx_readd(efx, reg + 4);
value->u32[2] = _efx_readd(efx, reg + 8);
value->u32[3] = _efx_readd(efx, reg + 12);
@@ -175,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
#else
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
- rmb();
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
@@ -249,7 +244,6 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
_efx_writed(efx, value->u32[2], reg + 8);
_efx_writed(efx, value->u32[3], reg + 12);
#endif
- wmb();
}
#define efx_writeo_page(efx, value, reg, page) \
_efx_writeo_page(efx, value, \
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 3dd45ed61f0..81a42539746 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
return &nic_data->mcdi;
}
-static inline void
-efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
-{
- struct siena_nic_data *nic_data = efx->nic_data;
- value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
-}
-
-static inline void
-efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
-{
- struct siena_nic_data *nic_data = efx->nic_data;
- __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
-}
-
void efx_mcdi_init(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
@@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
const u8 *inbuf, size_t inlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- unsigned pdu = MCDI_PDU(efx);
- unsigned doorbell = MCDI_DOORBELL(efx);
+ unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
unsigned int i;
efx_dword_t hdr;
u32 xflags, seqno;
@@ -106,28 +92,29 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
MCDI_HEADER_SEQ, seqno,
MCDI_HEADER_XFLAGS, xflags);
- efx_mcdi_writed(efx, &hdr, pdu);
+ efx_writed(efx, &hdr, pdu);
for (i = 0; i < inlen; i += 4)
- efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i),
- pdu + 4 + i);
+ _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
+
+ /* Ensure the payload is written out before the header */
+ wmb();
/* ring the doorbell with a distinctive value */
- EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc);
- efx_mcdi_writed(efx, &hdr, doorbell);
+ _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
}
static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- unsigned int pdu = MCDI_PDU(efx);
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
int i;
BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
BUG_ON(outlen & 3 || outlen >= 0x100);
for (i = 0; i < outlen; i += 4)
- efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i);
+ *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
}
static int efx_mcdi_poll(struct efx_nic *efx)
@@ -135,7 +122,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned int time, finish;
unsigned int respseq, respcmd, error;
- unsigned int pdu = MCDI_PDU(efx);
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
unsigned int rc, spins;
efx_dword_t reg;
@@ -161,7 +148,8 @@ static int efx_mcdi_poll(struct efx_nic *efx)
time = get_seconds();
- efx_mcdi_readd(efx, &reg, pdu);
+ rmb();
+ efx_readd(efx, &reg, pdu);
/* All 1's indicates that shared memory is in reset (and is
* not a valid header). Wait for it to come out reset before
@@ -188,7 +176,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
respseq, mcdi->seqno);
rc = EIO;
} else if (error) {
- efx_mcdi_readd(efx, &reg, pdu + 4);
+ efx_readd(efx, &reg, pdu + 4);
switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
#define TRANSLATE_ERROR(name) \
case MC_CMD_ERR_ ## name: \
@@ -222,21 +210,21 @@ out:
/* Test and clear MC-rebooted flag for this port/function */
int efx_mcdi_poll_reboot(struct efx_nic *efx)
{
- unsigned int addr = MCDI_REBOOT_FLAG(efx);
+ unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
efx_dword_t reg;
uint32_t value;
if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
return false;
- efx_mcdi_readd(efx, &reg, addr);
+ efx_readd(efx, &reg, addr);
value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
if (value == 0)
return 0;
EFX_ZERO_DWORD(reg);
- efx_mcdi_writed(efx, &reg, addr);
+ efx_writed(efx, &reg, addr);
if (value == MC_STATUS_DWORD_ASSERT)
return -EINTR;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index bafa23a6874..3edfbaf5f02 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1936,13 +1936,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
size = min_t(size_t, table->step, 16);
- if (table->offset >= efx->type->mem_map_size) {
- /* No longer mapped; return dummy data */
- memcpy(buf, "\xde\xc0\xad\xde", 4);
- buf += table->rows * size;
- continue;
- }
-
for (i = 0; i < table->rows; i++) {
switch (table->step) {
case 4: /* 32-bit register or SRAM */
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 4bd1f2839df..7443f99c977 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
/**
* struct siena_nic_data - Siena NIC state
* @mcdi: Management-Controller-to-Driver Interface
- * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
* @wol_filter_id: Wake-on-LAN packet filter id
*/
struct siena_nic_data {
struct efx_mcdi_iface mcdi;
- void __iomem *mcdi_smem;
int wol_filter_id;
};
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 5735e84c69d..2c3bd93fab5 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -250,26 +250,12 @@ static int siena_probe_nic(struct efx_nic *efx)
efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
- /* Initialise MCDI */
- nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
- FR_CZ_MC_TREG_SMEM,
- FR_CZ_MC_TREG_SMEM_STEP *
- FR_CZ_MC_TREG_SMEM_ROWS);
- if (!nic_data->mcdi_smem) {
- netif_err(efx, probe, efx->net_dev,
- "could not map MCDI at %llx+%x\n",
- (unsigned long long)efx->membase_phys +
- FR_CZ_MC_TREG_SMEM,
- FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
- rc = -ENOMEM;
- goto fail1;
- }
efx_mcdi_init(efx);
/* Recover from a failed assertion before probing */
rc = efx_mcdi_handle_assertion(efx);
if (rc)
- goto fail2;
+ goto fail1;
/* Let the BMC know that the driver is now in charge of link and
* filter settings. We must do this before we reset the NIC */
@@ -324,7 +310,6 @@ fail4:
fail3:
efx_mcdi_drv_attach(efx, false, NULL);
fail2:
- iounmap(nic_data->mcdi_smem);
fail1:
kfree(efx->nic_data);
return rc;
@@ -404,8 +389,6 @@ static int siena_init_nic(struct efx_nic *efx)
static void siena_remove_nic(struct efx_nic *efx)
{
- struct siena_nic_data *nic_data = efx->nic_data;
-
efx_nic_free_buffer(efx, &efx->irq_status);
siena_reset_hw(efx, RESET_TYPE_ALL);
@@ -415,8 +398,7 @@ static void siena_remove_nic(struct efx_nic *efx)
efx_mcdi_drv_attach(efx, false, NULL);
/* Tear down the private nic state */
- iounmap(nic_data->mcdi_smem);
- kfree(nic_data);
+ kfree(efx->nic_data);
efx->nic_data = NULL;
}
@@ -656,7 +638,8 @@ const struct efx_nic_type siena_a0_nic_type = {
.default_mac_ops = &efx_mcdi_mac_operations,
.revision = EFX_REV_SIENA_A0,
- .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */
+ .mem_map_size = (FR_CZ_MC_TREG_SMEM +
+ FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 99ff11400ce..e4dd3a7f304 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -38,8 +38,6 @@
#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
/* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
-/* Write combining and sriov=enabled are incompatible */
-#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
/* Spurious parity errors in TSORT buffers */
#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index b9016a30cdc..c90ddb61cc5 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -26,6 +26,7 @@
* LAN9215, LAN9216, LAN9217, LAN9218
* LAN9210, LAN9211
* LAN9220, LAN9221
+ * LAN89218
*
*/
@@ -1983,6 +1984,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
case 0x01170000:
case 0x01160000:
case 0x01150000:
+ case 0x218A0000:
/* LAN911[5678] family */
pdata->generation = pdata->idrev & 0x0000FFFF;
break;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index dc3fbf61910..c11a2b8327f 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -6234,12 +6234,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
-#ifdef BCM_KERNEL_SUPPORTS_8021Q
if (vlan_tx_tag_present(skb)) {
base_flags |= TXD_FLAG_VLAN;
vlan = vlan_tx_tag_get(skb);
}
-#endif
if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
!mss && skb->len > VLAN_ETH_FRAME_LEN)
@@ -15579,7 +15577,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
cancel_work_sync(&tp->reset_task);
- if (!tg3_flag(tp, USE_PHYLIB)) {
+ if (tg3_flag(tp, USE_PHYLIB)) {
tg3_phy_fini(tp);
tg3_mdio_fini(tp);
}
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 15772b1b6a9..13c1f044b40 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -59,6 +59,7 @@
#define USB_PRODUCT_IPHONE_3G 0x1292
#define USB_PRODUCT_IPHONE_3GS 0x1294
#define USB_PRODUCT_IPHONE_4 0x1297
+#define USB_PRODUCT_IPHONE_4_VZW 0x129c
#define IPHETH_USBINTF_CLASS 255
#define IPHETH_USBINTF_SUBCLASS 253
@@ -98,6 +99,10 @@ static struct usb_device_id ipheth_table[] = {
USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
{ }
};
MODULE_DEVICE_TABLE(usb, ipheth_table);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 2d4c0910295..2d394af8217 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -41,7 +41,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
case ADC_DC_CAL:
/* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
if (!IS_CHAN_B(chan) &&
- !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan)))
+ !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
+ IS_CHAN_HT20(chan)))
supported = true;
break;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 2339728a730..3e69c631ebb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -1514,7 +1514,7 @@ static const u32 ar9300_2p2_mac_core[][2] = {
{0x00008258, 0x00000000},
{0x0000825c, 0x40000000},
{0x00008260, 0x00080922},
- {0x00008264, 0x9bc00010},
+ {0x00008264, 0x9d400010},
{0x00008268, 0xffffffff},
{0x0000826c, 0x0000ffff},
{0x00008270, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 1baca8e4715..fcafec0605f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -671,7 +671,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
REG_WRITE_ARRAY(&ah->iniModesAdditional,
modesIndex, regWrites);
- if (AR_SREV_9300(ah))
+ if (AR_SREV_9330(ah))
REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites);
if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 6530694a59a..722967b86cf 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2303,6 +2303,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
mutex_lock(&sc->mutex);
cancel_delayed_work_sync(&sc->tx_complete_work);
+ if (ah->ah_flags & AH_UNPLUGGED) {
+ ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n");
+ mutex_unlock(&sc->mutex);
+ return;
+ }
+
if (sc->sc_flags & SC_OP_INVALID) {
ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 9a4850154fb..4c21f8cbdeb 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -205,14 +205,22 @@ static void ath_rx_remove_buffer(struct ath_softc *sc,
static void ath_rx_edma_cleanup(struct ath_softc *sc)
{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath_buf *bf;
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
list_for_each_entry(bf, &sc->rx.rxbuf, list) {
- if (bf->bf_mpdu)
+ if (bf->bf_mpdu) {
+ dma_unmap_single(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize,
+ DMA_BIDIRECTIONAL);
dev_kfree_skb_any(bf->bf_mpdu);
+ bf->bf_buf_addr = 0;
+ bf->bf_mpdu = NULL;
+ }
}
INIT_LIST_HEAD(&sc->rx.rxbuf);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 26f1ab840cc..e293a7921bf 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1632,7 +1632,8 @@ static void handle_irq_beacon(struct b43_wldev *dev)
u32 cmd, beacon0_valid, beacon1_valid;
if (!b43_is_mode(wl, NL80211_IFTYPE_AP) &&
- !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT))
+ !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) &&
+ !b43_is_mode(wl, NL80211_IFTYPE_ADHOC))
return;
/* This is the bottom half of the asynchronous beacon update. */
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 3774dd03474..ef9ad79d1bf 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1903,15 +1903,17 @@ static void ipw2100_down(struct ipw2100_priv *priv)
static int ipw2100_net_init(struct net_device *dev)
{
struct ipw2100_priv *priv = libipw_priv(dev);
+
+ return ipw2100_up(priv, 1);
+}
+
+static int ipw2100_wdev_init(struct net_device *dev)
+{
+ struct ipw2100_priv *priv = libipw_priv(dev);
const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
struct wireless_dev *wdev = &priv->ieee->wdev;
- int ret;
int i;
- ret = ipw2100_up(priv, 1);
- if (ret)
- return ret;
-
memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
/* fill-out priv->ieee->bg_band */
@@ -6350,9 +6352,13 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
"Error calling register_netdev.\n");
goto fail;
}
+ registered = 1;
+
+ err = ipw2100_wdev_init(dev);
+ if (err)
+ goto fail;
mutex_lock(&priv->action_mutex);
- registered = 1;
IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev));
@@ -6389,7 +6395,8 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
fail_unlock:
mutex_unlock(&priv->action_mutex);
-
+ wiphy_unregister(priv->ieee->wdev.wiphy);
+ kfree(priv->ieee->bg_band.channels);
fail:
if (dev) {
if (registered)
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 87813c33bdc..4ffebede5e0 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11425,16 +11425,23 @@ static void ipw_bg_down(struct work_struct *work)
/* Called by register_netdev() */
static int ipw_net_init(struct net_device *dev)
{
+ int rc = 0;
+ struct ipw_priv *priv = libipw_priv(dev);
+
+ mutex_lock(&priv->mutex);
+ if (ipw_up(priv))
+ rc = -EIO;
+ mutex_unlock(&priv->mutex);
+
+ return rc;
+}
+
+static int ipw_wdev_init(struct net_device *dev)
+{
int i, rc = 0;
struct ipw_priv *priv = libipw_priv(dev);
const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
struct wireless_dev *wdev = &priv->ieee->wdev;
- mutex_lock(&priv->mutex);
-
- if (ipw_up(priv)) {
- rc = -EIO;
- goto out;
- }
memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
@@ -11519,13 +11526,9 @@ static int ipw_net_init(struct net_device *dev)
set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
/* With that information in place, we can now register the wiphy... */
- if (wiphy_register(wdev->wiphy)) {
+ if (wiphy_register(wdev->wiphy))
rc = -EIO;
- goto out;
- }
-
out:
- mutex_unlock(&priv->mutex);
return rc;
}
@@ -11832,14 +11835,22 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
goto out_remove_sysfs;
}
+ err = ipw_wdev_init(net_dev);
+ if (err) {
+ IPW_ERROR("failed to register wireless device\n");
+ goto out_unregister_netdev;
+ }
+
#ifdef CONFIG_IPW2200_PROMISCUOUS
if (rtap_iface) {
err = ipw_prom_alloc(priv);
if (err) {
IPW_ERROR("Failed to register promiscuous network "
"device (error %d).\n", err);
- unregister_netdev(priv->net_dev);
- goto out_remove_sysfs;
+ wiphy_unregister(priv->ieee->wdev.wiphy);
+ kfree(priv->ieee->a_band.channels);
+ kfree(priv->ieee->bg_band.channels);
+ goto out_unregister_netdev;
}
}
#endif
@@ -11851,6 +11862,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
return 0;
+ out_unregister_netdev:
+ unregister_netdev(priv->net_dev);
out_remove_sysfs:
sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
out_release_irq:
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
index 977bd2477c6..164bcae821f 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
@@ -822,12 +822,15 @@ static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
out:
- rs_sta->last_txrate_idx = index;
- if (sband->band == IEEE80211_BAND_5GHZ)
- info->control.rates[0].idx = rs_sta->last_txrate_idx -
- IWL_FIRST_OFDM_RATE;
- else
+ if (sband->band == IEEE80211_BAND_5GHZ) {
+ if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
+ index = IWL_FIRST_OFDM_RATE;
+ rs_sta->last_txrate_idx = index;
+ info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
+ } else {
+ rs_sta->last_txrate_idx = index;
info->control.rates[0].idx = rs_sta->last_txrate_idx;
+ }
IWL_DEBUG_RATE(priv, "leave: %d\n", index);
}
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index 35cd2537e7f..e5971fe9d16 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -937,7 +937,7 @@ void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
&priv->contexts[IWL_RXON_CTX_BSS]);
#endif
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->wait_command_queue);
/* Keep the restart process from trying to send host
* commands by clearing the INIT status bit */
@@ -1746,7 +1746,7 @@ int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)
/* Set the FW error flag -- cleared on iwl_down */
set_bit(STATUS_FW_ERROR, &priv->status);
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->wait_command_queue);
/*
* Keep the restart process from trying to send host
* commands by clearing the INIT status bit
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
index 62b4b09122c..ce1fc9feb61 100644
--- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
@@ -167,7 +167,7 @@ int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
goto out;
}
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+ ret = wait_event_timeout(priv->wait_command_queue,
!test_bit(STATUS_HCMD_ACTIVE, &priv->status),
HOST_COMPLETE_TIMEOUT);
if (!ret) {
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
index 4fff995c6f3..ef9e268bf8a 100644
--- a/drivers/net/wireless/iwlegacy/iwl-tx.c
+++ b/drivers/net/wireless/iwlegacy/iwl-tx.c
@@ -625,6 +625,8 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
cmd = txq->cmd[cmd_index];
meta = &txq->meta[cmd_index];
+ txq->time_stamp = jiffies;
+
pci_unmap_single(priv->pci_dev,
dma_unmap_addr(meta, mapping),
dma_unmap_len(meta, len),
@@ -645,7 +647,7 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
iwl_legacy_get_cmd_string(cmd->hdr.cmd));
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->wait_command_queue);
}
/* Mark as unmapped */
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
index 795826a014e..66ee15629a7 100644
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -841,7 +841,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
test_bit(STATUS_RF_KILL_HW, &priv->status));
else
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->wait_command_queue);
}
/**
@@ -2269,7 +2269,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
iwl3945_reg_txpower_periodic(priv);
IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->wait_command_queue);
return;
@@ -2300,7 +2300,7 @@ static void __iwl3945_down(struct iwl_priv *priv)
iwl_legacy_clear_driver_stations(priv);
/* Unblock any waiting calls */
- wake_up_interruptible_all(&priv->wait_command_queue);
+ wake_up_all(&priv->wait_command_queue);
/* Wipe out the EXIT_PENDING status bit if we are not actually
* exiting the module */
@@ -2853,7 +2853,7 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
/* Wait for START_ALIVE from ucode. Otherwise callbacks from
* mac80211 will not be run successfully. */
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+ ret = wait_event_timeout(priv->wait_command_queue,
test_bit(STATUS_READY, &priv->status),
UCODE_READY_TIMEOUT);
if (!ret) {
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
index 14334668034..aa0c2539761 100644
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -576,7 +576,7 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
test_bit(STATUS_RF_KILL_HW, &priv->status));
else
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->wait_command_queue);
}
/**
@@ -926,7 +926,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
handled |= CSR_INT_BIT_FH_TX;
/* Wake up uCode load routine, now that load is complete */
priv->ucode_write_complete = 1;
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->wait_command_queue);
}
if (inta & ~handled) {
@@ -1795,7 +1795,7 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
iwl4965_rf_kill_ct_config(priv);
IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->wait_command_queue);
iwl_legacy_power_update_mode(priv, true);
IWL_DEBUG_INFO(priv, "Updated power mode\n");
@@ -1828,7 +1828,7 @@ static void __iwl4965_down(struct iwl_priv *priv)
iwl_legacy_clear_driver_stations(priv);
/* Unblock any waiting calls */
- wake_up_interruptible_all(&priv->wait_command_queue);
+ wake_up_all(&priv->wait_command_queue);
/* Wipe out the EXIT_PENDING status bit if we are not actually
* exiting the module */
@@ -2266,7 +2266,7 @@ int iwl4965_mac_start(struct ieee80211_hw *hw)
/* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
* mac80211 will not be run successfully. */
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+ ret = wait_event_timeout(priv->wait_command_queue,
test_bit(STATUS_READY, &priv->status),
UCODE_READY_TIMEOUT);
if (!ret) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index a895a099d08..56211006a18 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -167,7 +167,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
- memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib));
+ memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
if (!(cmd.radio_sensor_offset))
cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index b0ae4de7f08..f9c3cd95d61 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2140,7 +2140,12 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+ /*
+ * Including the following line will crash some AP's. This
+ * workaround removes the stimulus which causes the crash until
+ * the AP software can be fixed.
hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ */
hw->flags |= IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index dd6937e9705..77e528f5db8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -405,31 +405,33 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
- if (test_bit(STATUS_SCANNING, &priv->status) &&
- priv->scan_type != IWL_SCAN_NORMAL) {
- IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
- ret = -EAGAIN;
- goto out_unlock;
- }
-
- /* mac80211 will only ask for one band at a time */
- priv->scan_request = req;
- priv->scan_vif = vif;
-
/*
* If an internal scan is in progress, just set
* up the scan_request as per above.
*/
if (priv->scan_type != IWL_SCAN_NORMAL) {
- IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
+ IWL_DEBUG_SCAN(priv,
+ "SCAN request during internal scan - defer\n");
+ priv->scan_request = req;
+ priv->scan_vif = vif;
ret = 0;
- } else
+ } else {
+ priv->scan_request = req;
+ priv->scan_vif = vif;
+ /*
+ * mac80211 will only ask for one band at a time
+ * so using channels[0] here is ok
+ */
ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
req->channels[0]->band);
+ if (ret) {
+ priv->scan_request = NULL;
+ priv->scan_vif = NULL;
+ }
+ }
IWL_DEBUG_MAC80211(priv, "leave\n");
-out_unlock:
mutex_unlock(&priv->mutex);
return ret;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
index a6b2b1db0b1..222d410c586 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
@@ -771,6 +771,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
cmd = txq->cmd[cmd_index];
meta = &txq->meta[cmd_index];
+ txq->time_stamp = jiffies;
+
iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
/* Input error checking is done when commands are added to queue. */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index ef67f6786a8..0019dfd8fb0 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3697,14 +3697,15 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg);
/* Apparently the data is read from end to start */
- rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3,
- (u32 *)&rt2x00dev->eeprom[i]);
- rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2,
- (u32 *)&rt2x00dev->eeprom[i + 2]);
- rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1,
- (u32 *)&rt2x00dev->eeprom[i + 4]);
- rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0,
- (u32 *)&rt2x00dev->eeprom[i + 6]);
+ rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
+ /* The returned value is in CPU order, but eeprom is le */
+ rt2x00dev->eeprom[i] = cpu_to_le32(reg);
+ rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
+ *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
+ rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
+ *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
+ rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, &reg);
+ *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
mutex_unlock(&rt2x00dev->csr_mutex);
}
@@ -3870,19 +3871,23 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
return -ENODEV;
}
- if (!rt2x00_rf(rt2x00dev, RF2820) &&
- !rt2x00_rf(rt2x00dev, RF2850) &&
- !rt2x00_rf(rt2x00dev, RF2720) &&
- !rt2x00_rf(rt2x00dev, RF2750) &&
- !rt2x00_rf(rt2x00dev, RF3020) &&
- !rt2x00_rf(rt2x00dev, RF2020) &&
- !rt2x00_rf(rt2x00dev, RF3021) &&
- !rt2x00_rf(rt2x00dev, RF3022) &&
- !rt2x00_rf(rt2x00dev, RF3052) &&
- !rt2x00_rf(rt2x00dev, RF3320) &&
- !rt2x00_rf(rt2x00dev, RF5370) &&
- !rt2x00_rf(rt2x00dev, RF5390)) {
- ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
+ switch (rt2x00dev->chip.rf) {
+ case RF2820:
+ case RF2850:
+ case RF2720:
+ case RF2750:
+ case RF3020:
+ case RF2020:
+ case RF3021:
+ case RF3022:
+ case RF3052:
+ case RF3320:
+ case RF5370:
+ case RF5390:
+ break;
+ default:
+ ERROR(rt2x00dev, "Invalid RF chipset 0x%x detected.\n",
+ rt2x00dev->chip.rf);
return -ENODEV;
}
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 1bdc1aa305c..04c4e9eb6ee 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -610,6 +610,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->link_state = MAC80211_NOLINK;
memset(mac->bssid, 0, 6);
+
+ /* reset sec info */
+ rtl_cam_reset_sec_info(hw);
+
+ rtl_cam_reset_all_entry(hw);
mac->vendor = PEER_UNKNOWN;
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
@@ -1063,6 +1068,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
*or clear all entry here.
*/
rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
+
+ rtl_cam_reset_sec_info(hw);
+
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 906e7aa55bc..3e52a549622 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -549,15 +549,16 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
(tcb_desc->rts_use_shortpreamble ? 1 : 0)
: (tcb_desc->rts_use_shortgi ? 1 : 0)));
if (mac->bw_40) {
- if (tcb_desc->packet_bw) {
+ if (rate_flag & IEEE80211_TX_RC_DUP_DATA) {
SET_TX_DESC_DATA_BW(txdesc, 1);
SET_TX_DESC_DATA_SC(txdesc, 3);
+ } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){
+ SET_TX_DESC_DATA_BW(txdesc, 1);
+ SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc);
} else {
SET_TX_DESC_DATA_BW(txdesc, 0);
- if (rate_flag & IEEE80211_TX_RC_DUP_DATA)
- SET_TX_DESC_DATA_SC(txdesc,
- mac->cur_40_prime_sc);
- }
+ SET_TX_DESC_DATA_SC(txdesc, 0);
+ }
} else {
SET_TX_DESC_DATA_BW(txdesc, 0);
SET_TX_DESC_DATA_SC(txdesc, 0);
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 8b1cef0ffde..4bf3cf457ef 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -863,6 +863,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
u8 tid = 0;
u16 seq_number = 0;
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
if (ieee80211_is_auth(fc)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
rtl_ips_nic_on(hw);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 0ca86f9ec4e..182562952c7 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
xenvif_get(vif);
rtnl_lock();
- if (netif_running(vif->dev))
- xenvif_up(vif);
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
dev_set_mtu(vif->dev, ETH_DATA_LEN);
netdev_update_features(vif->dev);
netif_carrier_on(vif->dev);
+ if (netif_running(vif->dev))
+ xenvif_up(vif);
rtnl_unlock();
return 0;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 4e84fd4a431..e9651f0a881 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
-enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
/*
* The default CLS is used if arch didn't set CLS explicitly and not
@@ -3568,10 +3568,14 @@ static int __init pci_setup(char *str)
pci_hotplug_io_size = memparse(str + 9, &str);
} else if (!strncmp(str, "hpmemsize=", 10)) {
pci_hotplug_mem_size = memparse(str + 10, &str);
+ } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
+ pcie_bus_config = PCIE_BUS_TUNE_OFF;
} else if (!strncmp(str, "pcie_bus_safe", 13)) {
pcie_bus_config = PCIE_BUS_SAFE;
} else if (!strncmp(str, "pcie_bus_perf", 13)) {
pcie_bus_config = PCIE_BUS_PERFORMANCE;
+ } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
+ pcie_bus_config = PCIE_BUS_PEER2PEER;
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index f3f94a5c068..6ab6bd3df4b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1458,12 +1458,24 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
*/
void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
{
- u8 smpss = mpss;
+ u8 smpss;
if (!pci_is_pcie(bus->self))
return;
+ if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
+ return;
+
+ /* FIXME - Peer to peer DMA is possible, though the endpoint would need
+ * to be aware to the MPS of the destination. To work around this,
+ * simply force the MPS of the entire system to the smallest possible.
+ */
+ if (pcie_bus_config == PCIE_BUS_PEER2PEER)
+ smpss = 0;
+
if (pcie_bus_config == PCIE_BUS_SAFE) {
+ smpss = mpss;
+
pcie_find_smpss(bus->self, &smpss);
pci_walk_bus(bus, pcie_find_smpss, &smpss);
}
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 6fa215a3861..90832a95599 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -400,9 +400,8 @@ static int pcifront_claim_resource(struct pci_dev *dev, void *data)
dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n",
pci_name(dev), i);
if (pci_claim_resource(dev, i)) {
- dev_err(&pdev->xdev->dev, "Could not claim "
- "resource %s/%d! Device offline. Try "
- "giving less than 4GB to domain.\n",
+ dev_err(&pdev->xdev->dev, "Could not claim resource %s/%d! "
+ "Device offline. Try using e820_host=1 in the guest config.\n",
pci_name(dev), i);
}
}
diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c
index c998f7aaadb..0fac9658b02 100644
--- a/drivers/pcmcia/sa1100_simpad.c
+++ b/drivers/pcmcia/sa1100_simpad.c
@@ -15,10 +15,6 @@
#include <mach/simpad.h>
#include "sa1100_generic.h"
-extern long get_cs3_shadow(void);
-extern void set_cs3_bit(int value);
-extern void clear_cs3_bit(int value);
-
static struct pcmcia_irqs irqs[] = {
{ 1, IRQ_GPIO_CF_CD, "CF_CD" },
};
@@ -26,7 +22,7 @@ static struct pcmcia_irqs irqs[] = {
static int simpad_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
- clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
+ simpad_clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
skt->socket.pci_irq = IRQ_GPIO_CF_IRQ;
@@ -38,8 +34,8 @@ static void simpad_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs));
/* Disable CF bus: */
- //set_cs3_bit(PCMCIA_BUFF_DIS);
- clear_cs3_bit(PCMCIA_RESET);
+ /*simpad_set_cs3_bit(PCMCIA_BUFF_DIS);*/
+ simpad_clear_cs3_bit(PCMCIA_RESET);
}
static void
@@ -47,15 +43,16 @@ simpad_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
struct pcmcia_state *state)
{
unsigned long levels = GPLR;
- long cs3reg = get_cs3_shadow();
+ long cs3reg = simpad_get_cs3_ro();
state->detect=((levels & GPIO_CF_CD)==0)?1:0;
state->ready=(levels & GPIO_CF_IRQ)?1:0;
- state->bvd1=1; /* Not available on Simpad. */
- state->bvd2=1; /* Not available on Simpad. */
+ state->bvd1 = 1; /* Might be cs3reg & PCMCIA_BVD1 */
+ state->bvd2 = 1; /* Might be cs3reg & PCMCIA_BVD2 */
state->wrprot=0; /* Not available on Simpad. */
-
- if((cs3reg & 0x0c) == 0x0c) {
+
+ if ((cs3reg & (PCMCIA_VS1|PCMCIA_VS2)) ==
+ (PCMCIA_VS1|PCMCIA_VS2)) {
state->vs_3v=0;
state->vs_Xv=0;
} else {
@@ -75,23 +72,23 @@ simpad_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
/* Murphy: see table of MIC2562a-1 */
switch (state->Vcc) {
case 0:
- clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
+ simpad_clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
break;
case 33:
- clear_cs3_bit(VCC_3V_EN|EN1);
- set_cs3_bit(VCC_5V_EN|EN0);
+ simpad_clear_cs3_bit(VCC_3V_EN|EN1);
+ simpad_set_cs3_bit(VCC_5V_EN|EN0);
break;
case 50:
- clear_cs3_bit(VCC_5V_EN|EN1);
- set_cs3_bit(VCC_3V_EN|EN0);
+ simpad_clear_cs3_bit(VCC_5V_EN|EN1);
+ simpad_set_cs3_bit(VCC_3V_EN|EN0);
break;
default:
printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
__func__, state->Vcc);
- clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
+ simpad_clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
local_irq_restore(flags);
return -1;
}
@@ -110,7 +107,7 @@ static void simpad_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
static void simpad_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
{
soc_pcmcia_disable_irqs(skt, irqs, ARRAY_SIZE(irqs));
- set_cs3_bit(PCMCIA_RESET);
+ simpad_set_cs3_bit(PCMCIA_RESET);
}
static struct pcmcia_low_level simpad_pcmcia_ops = {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index cbde448f994..eb3140ee821 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -654,8 +654,8 @@ static struct io_subchannel_private console_priv;
static int console_subchannel_in_use;
/*
- * Use tpi to get a pending interrupt, call the interrupt handler and
- * return a pointer to the subchannel structure.
+ * Use cio_tpi to get a pending interrupt and call the interrupt handler.
+ * Return non-zero if an interrupt was processed, zero otherwise.
*/
static int cio_tpi(void)
{
@@ -667,6 +667,10 @@ static int cio_tpi(void)
tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
if (tpi(NULL) != 1)
return 0;
+ if (tpi_info->adapter_IO) {
+ do_adapter_IO(tpi_info->isc);
+ return 1;
+ }
irb = (struct irb *)&S390_lowcore.irb;
/* Store interrupt response block to lowcore. */
if (tsch(tpi_info->schid, irb) != 0)
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index b7bd5b0cc7a..3868ab2397c 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1800,10 +1800,12 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
switch (retval) {
case SCSI_MLQUEUE_HOST_BUSY:
twa_free_request_id(tw_dev, request_id);
+ twa_unmap_scsi_data(tw_dev, request_id);
break;
case 1:
tw_dev->state[request_id] = TW_S_COMPLETED;
twa_free_request_id(tw_dev, request_id);
+ twa_unmap_scsi_data(tw_dev, request_id);
SCpnt->result = (DID_ERROR << 16);
done(SCpnt);
retval = 0;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8d9dae89f06..3878b739508 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -837,6 +837,7 @@ config SCSI_ISCI
# (temporary): known alpha quality driver
depends on EXPERIMENTAL
select SCSI_SAS_LIBSAS
+ select SCSI_SAS_HOST_SMP
---help---
This driver supports the 6Gb/s SAS capabilities of the storage
control unit found in the Intel(R) C600 series chipset.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 3c08f5352b2..6153a66a8a3 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -88,7 +88,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
-obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/
+obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
obj-$(CONFIG_SCSI_BFA_FC) += bfa/
obj-$(CONFIG_SCSI_PAS16) += pas16.o
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index e7d0d47b918..e5f2d7d9002 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1283,6 +1283,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
kfree(aac->queues);
aac->queues = NULL;
free_irq(aac->pdev->irq, aac);
+ if (aac->msi)
+ pci_disable_msi(aac->pdev);
kfree(aac->fsa_dev);
aac->fsa_dev = NULL;
quirks = aac_get_driver_ident(index)->quirks;
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index f4aa6375339..1242c7c04a0 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -912,7 +912,7 @@ static void l2t_put(struct cxgbi_sock *csk)
struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
if (csk->l2t) {
- l2t_release(L2DATA(t3dev), csk->l2t);
+ l2t_release(t3dev, csk->l2t);
csk->l2t = NULL;
cxgbi_sock_put(csk);
}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index f84084bba2f..16ad97df5ba 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1721,7 +1721,7 @@ static int sas_find_bcast_dev(struct domain_device *dev,
list_for_each_entry(ch, &ex->children, siblings) {
if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) {
res = sas_find_bcast_dev(ch, src_dev);
- if (src_dev)
+ if (*src_dev)
return res;
}
}
@@ -1769,10 +1769,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
sas_disable_routing(parent, phy->attached_sas_addr);
}
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
- sas_port_delete_phy(phy->port, phy->phy);
- if (phy->port->num_phys == 0)
- sas_port_delete(phy->port);
- phy->port = NULL;
+ if (phy->port) {
+ sas_port_delete_phy(phy->port, phy->phy);
+ if (phy->port->num_phys == 0)
+ sas_port_delete(phy->port);
+ phy->port = NULL;
+ }
}
static int sas_discover_bfs_by_root_level(struct domain_device *root,
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 646fc5263d5..8a7591f035e 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1507,8 +1507,8 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
if (k != blocks_done) {
qla_printk(KERN_WARNING, sp->fcport->vha->hw,
- "unexpected tag values tag:lba=%x:%lx)\n",
- e_ref_tag, lba_s);
+ "unexpected tag values tag:lba=%x:%llx)\n",
+ e_ref_tag, (unsigned long long)lba_s);
return 1;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 4cace3f20c0..1e69527f1e4 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1328,10 +1328,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
qla2x00_sp_compl(ha, sp);
} else {
ctx = sp->ctx;
- if (ctx->type == SRB_LOGIN_CMD ||
- ctx->type == SRB_LOGOUT_CMD) {
- ctx->u.iocb_cmd->free(sp);
- } else {
+ if (ctx->type == SRB_ELS_CMD_RPT ||
+ ctx->type == SRB_ELS_CMD_HST ||
+ ctx->type == SRB_CT_CMD) {
struct fc_bsg_job *bsg_job =
ctx->u.bsg_job;
if (bsg_job->request->msgcode
@@ -1343,6 +1342,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
kfree(sp->ctx);
mempool_free(sp,
ha->srb_mempool);
+ } else {
+ ctx->u.iocb_cmd->free(sp);
}
}
}
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index d2407558773..24cacff5778 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -825,6 +825,9 @@ static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
+ if (!(mspi->flags & SPI_CPM_MODE))
+ return;
+
dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 8ac6542aedc..fa594d604ac 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -786,9 +786,11 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
if (cs_gpio < 0)
cs_gpio = mxc_platform_info->chipselect[i];
+
+ spi_imx->chipselect[i] = cs_gpio;
if (cs_gpio < 0)
continue;
- spi_imx->chipselect[i] = cs_gpio;
+
ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
if (ret) {
while (i > 0) {
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 1d23f383186..6a80749391d 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -50,6 +50,8 @@
#define PCH_RX_THOLD 7
#define PCH_RX_THOLD_MAX 15
+#define PCH_TX_THOLD 2
+
#define PCH_MAX_BAUDRATE 5000000
#define PCH_MAX_FIFO_DEPTH 16
@@ -58,6 +60,7 @@
#define PCH_SLEEP_TIME 10
#define SSN_LOW 0x02U
+#define SSN_HIGH 0x03U
#define SSN_NO_CONTROL 0x00U
#define PCH_MAX_CS 0xFF
#define PCI_DEVICE_ID_GE_SPI 0x8816
@@ -316,16 +319,19 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
/* if transfer complete interrupt */
if (reg_spsr_val & SPSR_FI_BIT) {
- if (tx_index < bpw_len)
+ if ((tx_index == bpw_len) && (rx_index == tx_index)) {
+ /* disable interrupts */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+
+ /* transfer is completed;
+ inform pch_spi_process_messages */
+ data->transfer_complete = true;
+ data->transfer_active = false;
+ wake_up(&data->wait);
+ } else {
dev_err(&data->master->dev,
"%s : Transfer is not completed", __func__);
- /* disable interrupts */
- pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
-
- /* transfer is completed;inform pch_spi_process_messages */
- data->transfer_complete = true;
- data->transfer_active = false;
- wake_up(&data->wait);
+ }
}
}
@@ -348,16 +354,26 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id)
"%s returning due to suspend\n", __func__);
return IRQ_NONE;
}
- if (data->use_dma)
- return IRQ_NONE;
io_remap_addr = data->io_remap_addr;
spsr = io_remap_addr + PCH_SPSR;
reg_spsr_val = ioread32(spsr);
- if (reg_spsr_val & SPSR_ORF_BIT)
- dev_err(&board_dat->pdev->dev, "%s Over run error", __func__);
+ if (reg_spsr_val & SPSR_ORF_BIT) {
+ dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
+ if (data->current_msg->complete != 0) {
+ data->transfer_complete = true;
+ data->current_msg->status = -EIO;
+ data->current_msg->complete(data->current_msg->context);
+ data->bcurrent_msg_processing = false;
+ data->current_msg = NULL;
+ data->cur_trans = NULL;
+ }
+ }
+
+ if (data->use_dma)
+ return IRQ_NONE;
/* Check if the interrupt is for SPI device */
if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
@@ -756,10 +772,6 @@ static void pch_spi_set_ir(struct pch_spi_data *data)
wait_event_interruptible(data->wait, data->transfer_complete);
- pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
- dev_dbg(&data->master->dev,
- "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
-
/* clear all interrupts */
pch_spi_writereg(data->master, PCH_SPSR,
pch_spi_readreg(data->master, PCH_SPSR));
@@ -815,10 +827,11 @@ static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
}
}
-static void pch_spi_start_transfer(struct pch_spi_data *data)
+static int pch_spi_start_transfer(struct pch_spi_data *data)
{
struct pch_spi_dma_ctrl *dma;
unsigned long flags;
+ int rtn;
dma = &data->dma;
@@ -833,19 +846,23 @@ static void pch_spi_start_transfer(struct pch_spi_data *data)
initiating the transfer. */
dev_dbg(&data->master->dev,
"%s:waiting for transfer to get over\n", __func__);
- wait_event_interruptible(data->wait, data->transfer_complete);
+ rtn = wait_event_interruptible_timeout(data->wait,
+ data->transfer_complete,
+ msecs_to_jiffies(2 * HZ));
dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
DMA_FROM_DEVICE);
+
+ dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
+ DMA_FROM_DEVICE);
+ memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
+
async_tx_ack(dma->desc_rx);
async_tx_ack(dma->desc_tx);
kfree(dma->sg_tx_p);
kfree(dma->sg_rx_p);
spin_lock_irqsave(&data->lock, flags);
- pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
- dev_dbg(&data->master->dev,
- "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
/* clear fifo threshold, disable interrupts, disable SPI transfer */
pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
@@ -858,6 +875,8 @@ static void pch_spi_start_transfer(struct pch_spi_data *data)
pch_spi_clear_fifo(data->master);
spin_unlock_irqrestore(&data->lock, flags);
+
+ return rtn;
}
static void pch_dma_rx_complete(void *arg)
@@ -1023,8 +1042,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
/* set receive fifo threshold and transmit fifo threshold */
pch_spi_setclr_reg(data->master, PCH_SPCR,
((size - 1) << SPCR_RFIC_FIELD) |
- ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) <<
- SPCR_TFIC_FIELD),
+ (PCH_TX_THOLD << SPCR_TFIC_FIELD),
MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
spin_unlock_irqrestore(&data->lock, flags);
@@ -1035,13 +1053,20 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
/* offset, length setting */
sg = dma->sg_rx_p;
for (i = 0; i < num; i++, sg++) {
- if (i == 0) {
- sg->offset = 0;
+ if (i == (num - 2)) {
+ sg->offset = size * i;
+ sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
sg->offset);
sg_dma_len(sg) = rem;
+ } else if (i == (num - 1)) {
+ sg->offset = size * (i - 1) + rem;
+ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
+ sg->offset);
+ sg_dma_len(sg) = size;
} else {
- sg->offset = rem + size * (i - 1);
+ sg->offset = size * i;
sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
sg->offset);
@@ -1065,6 +1090,16 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
dma->desc_rx = desc_rx;
/* TX */
+ if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
+ num = data->bpw_len / PCH_DMA_TRANS_SIZE;
+ size = PCH_DMA_TRANS_SIZE;
+ rem = 16;
+ } else {
+ num = 1;
+ size = data->bpw_len;
+ rem = data->bpw_len;
+ }
+
dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
/* offset, length setting */
@@ -1162,6 +1197,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
if (data->use_dma)
pch_spi_request_dma(data,
data->current_msg->spi->bits_per_word);
+ pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
do {
/* If we are already processing a message get the next
transfer structure from the message otherwise retrieve
@@ -1184,7 +1220,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
if (data->use_dma) {
pch_spi_handle_dma(data, &bpw);
- pch_spi_start_transfer(data);
+ if (!pch_spi_start_transfer(data))
+ goto out;
pch_spi_copy_rx_data_for_dma(data, bpw);
} else {
pch_spi_set_tx(data, &bpw);
@@ -1222,6 +1259,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
} while (data->cur_trans != NULL);
+out:
+ pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
if (data->use_dma)
pch_spi_release_dma(data);
}
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 6859af0778c..7611def97d0 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -241,8 +241,10 @@ static int labpc_eeprom_write_insn(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data);
static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd);
-#ifdef CONFIG_COMEDI_PCI
+#ifdef CONFIG_ISA_DMA_API
static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd);
+#endif
+#ifdef CONFIG_COMEDI_PCI
static int labpc_find_device(struct comedi_device *dev, int bus, int slot);
#endif
static int labpc_dio_mem_callback(int dir, int port, int data,
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 1a7c19ae766..8b307b42879 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -411,7 +411,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
skb->protocol = eth_type_trans(skb, dev);
skb->dev = dev;
- if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error))
+ if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc ||
+ work->word2.s.L4_error || !work->word2.s.tcp_or_udp))
skb->ip_summed = CHECKSUM_NONE;
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index a3f5162bfed..462fbc20561 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -1242,7 +1242,7 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
int ret = 0;
BUG_ON(!is_ephemeral(pool));
- zbud_decompress(virt_to_page(data), pampd);
+ zbud_decompress((struct page *)(data), pampd);
zbud_free_and_delist((struct zbud_hdr *)pampd);
atomic_dec(&zcache_curr_eph_pampd_count);
return ret;
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 1060c7b7f80..62e54053bcd 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -6,7 +6,6 @@ target_core_mod-y := target_core_configfs.o \
target_core_hba.o \
target_core_pr.o \
target_core_alua.o \
- target_core_scdb.o \
target_core_tmr.o \
target_core_tpg.o \
target_core_transport.o \
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 6a4ea29c2f3..4d01768fcd9 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -765,7 +765,7 @@ static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 :
cmd->se_cmd.t_data_nents;
- iov_count += TRANSPORT_IOV_DATA_BUFFER;
+ iov_count += ISCSI_IOV_DATA_BUFFER;
cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL);
if (!cmd->iov_data) {
@@ -3538,16 +3538,8 @@ get_immediate:
spin_lock_bh(&conn->cmd_lock);
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
- /*
- * Determine if a struct se_cmd is assoicated with
- * this struct iscsi_cmd.
- */
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) &&
- !(cmd->tmr_req))
- iscsit_release_cmd(cmd);
- else
- transport_generic_free_cmd(&cmd->se_cmd,
- 1, 0);
+
+ iscsit_free_cmd(cmd);
goto get_immediate;
case ISTATE_SEND_NOPIN_WANT_RESPONSE:
spin_unlock_bh(&cmd->istate_lock);
@@ -3940,7 +3932,6 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
{
struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
struct iscsi_session *sess = conn->sess;
- struct se_cmd *se_cmd;
/*
* We expect this function to only ever be called from either RX or TX
* thread context via iscsit_close_connection() once the other context
@@ -3948,35 +3939,13 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
*/
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) {
- list_del(&cmd->i_list);
- spin_unlock_bh(&conn->cmd_lock);
- iscsit_increment_maxcmdsn(cmd, sess);
- se_cmd = &cmd->se_cmd;
- /*
- * Special cases for active iSCSI TMR, and
- * transport_lookup_cmd_lun() failing from
- * iscsit_get_lun_for_cmd() in iscsit_handle_scsi_cmd().
- */
- if (cmd->tmr_req && se_cmd->transport_wait_for_tasks)
- se_cmd->transport_wait_for_tasks(se_cmd, 1, 1);
- else if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)
- transport_release_cmd(se_cmd);
- else
- iscsit_release_cmd(cmd);
-
- spin_lock_bh(&conn->cmd_lock);
- continue;
- }
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
iscsit_increment_maxcmdsn(cmd, sess);
- se_cmd = &cmd->se_cmd;
- if (se_cmd->transport_wait_for_tasks)
- se_cmd->transport_wait_for_tasks(se_cmd, 1, 1);
+ iscsit_free_cmd(cmd);
spin_lock_bh(&conn->cmd_lock);
}
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 11fd7430781..beb39469e7f 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -18,6 +18,7 @@
* GNU General Public License for more details.
******************************************************************************/
+#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <linux/err.h>
@@ -27,40 +28,11 @@
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
-static unsigned char chap_asciihex_to_binaryhex(unsigned char val[2])
-{
- unsigned char result = 0;
- /*
- * MSB
- */
- if ((val[0] >= 'a') && (val[0] <= 'f'))
- result = ((val[0] - 'a' + 10) & 0xf) << 4;
- else
- if ((val[0] >= 'A') && (val[0] <= 'F'))
- result = ((val[0] - 'A' + 10) & 0xf) << 4;
- else /* digit */
- result = ((val[0] - '0') & 0xf) << 4;
- /*
- * LSB
- */
- if ((val[1] >= 'a') && (val[1] <= 'f'))
- result |= ((val[1] - 'a' + 10) & 0xf);
- else
- if ((val[1] >= 'A') && (val[1] <= 'F'))
- result |= ((val[1] - 'A' + 10) & 0xf);
- else /* digit */
- result |= ((val[1] - '0') & 0xf);
-
- return result;
-}
-
static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
{
- int i, j = 0;
+ int j = DIV_ROUND_UP(len, 2);
- for (i = 0; i < len; i += 2) {
- dst[j++] = (unsigned char) chap_asciihex_to_binaryhex(&src[i]);
- }
+ hex2bin(dst, src, j);
dst[j] = '\0';
return j;
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 470ed551eeb..3723d90d5ae 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -57,6 +57,9 @@
#define TA_PROD_MODE_WRITE_PROTECT 0
#define TA_CACHE_CORE_NPS 0
+
+#define ISCSI_IOV_DATA_BUFFER 5
+
enum tpg_np_network_transport_table {
ISCSI_TCP = 0,
ISCSI_SCTP_TCP = 1,
@@ -425,7 +428,6 @@ struct iscsi_cmd {
/* Number of times struct iscsi_cmd is present in immediate queue */
atomic_t immed_queue_count;
atomic_t response_queue_count;
- atomic_t transport_sent;
spinlock_t datain_lock;
spinlock_t dataout_timeout_lock;
/* spinlock for protecting struct iscsi_cmd->i_state */
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 91a4d170bda..0b8404c3012 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -143,12 +143,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
list_del(&cmd->i_list);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
- iscsit_release_cmd(cmd);
- else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1, 1);
+ iscsit_free_cmd(cmd);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -170,12 +165,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
list_del(&cmd->i_list);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
- iscsit_release_cmd(cmd);
- else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1, 1);
+ iscsit_free_cmd(cmd);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -260,12 +250,7 @@ void iscsit_discard_cr_cmds_by_expstatsn(
iscsit_remove_cmd_from_connection_recovery(cmd, sess);
spin_unlock(&cr->conn_recovery_cmd_lock);
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
- iscsit_release_cmd(cmd);
- else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1, 0);
+ iscsit_free_cmd(cmd);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -319,12 +304,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
- iscsit_release_cmd(cmd);
- else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1, 1);
+ iscsit_free_cmd(cmd);
spin_lock_bh(&conn->cmd_lock);
}
spin_unlock_bh(&conn->cmd_lock);
@@ -377,13 +357,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
-
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
- iscsit_release_cmd(cmd);
- else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1, 0);
+ iscsit_free_cmd(cmd);
spin_lock_bh(&conn->cmd_lock);
continue;
}
@@ -403,13 +377,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
(cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
-
- if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
- !(cmd->se_cmd.transport_wait_for_tasks))
- iscsit_release_cmd(cmd);
- else
- cmd->se_cmd.transport_wait_for_tasks(
- &cmd->se_cmd, 1, 1);
+ iscsit_free_cmd(cmd);
spin_lock_bh(&conn->cmd_lock);
continue;
}
@@ -434,10 +402,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
iscsit_free_all_datain_reqs(cmd);
- if ((cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) &&
- cmd->se_cmd.transport_wait_for_tasks)
- cmd->se_cmd.transport_wait_for_tasks(&cmd->se_cmd,
- 0, 0);
+ transport_wait_for_tasks(&cmd->se_cmd);
/*
* Add the struct iscsi_cmd to the connection recovery cmd list
*/
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 497b2e718a7..5b773160200 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1430,7 +1430,7 @@ static int iscsi_enforce_integrity_rules(
u8 DataSequenceInOrder = 0;
u8 ErrorRecoveryLevel = 0, SessionType = 0;
u8 IFMarker = 0, OFMarker = 0;
- u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0;
+ u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1;
u32 FirstBurstLength = 0, MaxBurstLength = 0;
struct iscsi_param *param = NULL;
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index db1fe1ec84d..490207eacde 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -250,7 +250,7 @@ static int iscsit_task_reassign_complete_write(
* so if we have received all DataOUT we can safety ignore Initiator.
*/
if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
- if (!atomic_read(&cmd->transport_sent)) {
+ if (!atomic_read(&cmd->se_cmd.t_transport_sent)) {
pr_debug("WRITE ITT: 0x%08x: t_state: %d"
" never sent to transport\n",
cmd->init_task_tag, cmd->se_cmd.t_state);
@@ -314,11 +314,11 @@ static int iscsit_task_reassign_complete_read(
cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
}
- if (!atomic_read(&cmd->transport_sent)) {
+ if (!atomic_read(&cmd->se_cmd.t_transport_sent)) {
pr_debug("READ ITT: 0x%08x: t_state: %d never sent to"
" transport\n", cmd->init_task_tag,
cmd->se_cmd.t_state);
- transport_generic_handle_cdb(se_cmd);
+ transport_handle_cdb_direct(se_cmd);
return 0;
}
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index a0d23bc0fc9..02348f727bd 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -289,7 +289,8 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
}
se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
- (void *)cmd->tmr_req, tcm_function);
+ (void *)cmd->tmr_req, tcm_function,
+ GFP_KERNEL);
if (!se_cmd->se_tmr_req)
goto out;
@@ -839,6 +840,23 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
kmem_cache_free(lio_cmd_cache, cmd);
}
+void iscsit_free_cmd(struct iscsi_cmd *cmd)
+{
+ /*
+ * Determine if a struct se_cmd is assoicated with
+ * this struct iscsi_cmd.
+ */
+ switch (cmd->iscsi_opcode) {
+ case ISCSI_OP_SCSI_CMD:
+ case ISCSI_OP_SCSI_TMFUNC:
+ transport_generic_free_cmd(&cmd->se_cmd, 1);
+ break;
+ default:
+ iscsit_release_cmd(cmd);
+ break;
+ }
+}
+
int iscsit_check_session_usage_count(struct iscsi_session *sess)
{
spin_lock_bh(&sess->session_usage_lock);
@@ -875,40 +893,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess)
}
/*
- * Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker
- * array counts needed for sync and steering.
- */
-static int iscsit_determine_sync_and_steering_counts(
- struct iscsi_conn *conn,
- struct iscsi_data_count *count)
-{
- u32 length = count->data_length;
- u32 marker, markint;
-
- count->sync_and_steering = 1;
-
- marker = (count->type == ISCSI_RX_DATA) ?
- conn->of_marker : conn->if_marker;
- markint = (count->type == ISCSI_RX_DATA) ?
- (conn->conn_ops->OFMarkInt * 4) :
- (conn->conn_ops->IFMarkInt * 4);
- count->ss_iov_count = count->iov_count;
-
- while (length > 0) {
- if (length >= marker) {
- count->ss_iov_count += 3;
- count->ss_marker_count += 2;
-
- length -= marker;
- marker = markint;
- } else
- length = 0;
- }
-
- return 0;
-}
-
-/*
* Setup conn->if_marker and conn->of_marker values based upon
* the initial marker-less interval. (see iSCSI v19 A.2)
*/
@@ -1290,7 +1274,7 @@ int iscsit_fe_sendpage_sg(
struct kvec iov;
u32 tx_hdr_size, data_len;
u32 offset = cmd->first_data_sg_off;
- int tx_sent;
+ int tx_sent, iov_off;
send_hdr:
tx_hdr_size = ISCSI_HDR_LEN;
@@ -1310,9 +1294,19 @@ send_hdr:
}
data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
- if (conn->conn_ops->DataDigest)
+ /*
+ * Set iov_off used by padding and data digest tx_data() calls below
+ * in order to determine proper offset into cmd->iov_data[]
+ */
+ if (conn->conn_ops->DataDigest) {
data_len -= ISCSI_CRC_LEN;
-
+ if (cmd->padding)
+ iov_off = (cmd->iov_data_count - 2);
+ else
+ iov_off = (cmd->iov_data_count - 1);
+ } else {
+ iov_off = (cmd->iov_data_count - 1);
+ }
/*
* Perform sendpage() for each page in the scatterlist
*/
@@ -1341,8 +1335,7 @@ send_pg:
send_padding:
if (cmd->padding) {
- struct kvec *iov_p =
- &cmd->iov_data[cmd->iov_data_count-1];
+ struct kvec *iov_p = &cmd->iov_data[iov_off++];
tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
if (cmd->padding != tx_sent) {
@@ -1356,8 +1349,7 @@ send_padding:
send_datacrc:
if (conn->conn_ops->DataDigest) {
- struct kvec *iov_d =
- &cmd->iov_data[cmd->iov_data_count];
+ struct kvec *iov_d = &cmd->iov_data[iov_off];
tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
if (ISCSI_CRC_LEN != tx_sent) {
@@ -1431,8 +1423,7 @@ static int iscsit_do_rx_data(
struct iscsi_data_count *count)
{
int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
- u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0;
- struct kvec iov[count->ss_iov_count], *iov_p;
+ struct kvec *iov_p;
struct msghdr msg;
if (!conn || !conn->sock || !conn->conn_ops)
@@ -1440,93 +1431,8 @@ static int iscsit_do_rx_data(
memset(&msg, 0, sizeof(struct msghdr));
- if (count->sync_and_steering) {
- int size = 0;
- u32 i, orig_iov_count = 0;
- u32 orig_iov_len = 0, orig_iov_loc = 0;
- u32 iov_count = 0, per_iov_bytes = 0;
- u32 *rx_marker, old_rx_marker = 0;
- struct kvec *iov_record;
-
- memset(&rx_marker_val, 0,
- count->ss_marker_count * sizeof(u32));
- memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
-
- iov_record = count->iov;
- orig_iov_count = count->iov_count;
- rx_marker = &conn->of_marker;
-
- i = 0;
- size = data;
- orig_iov_len = iov_record[orig_iov_loc].iov_len;
- while (size > 0) {
- pr_debug("rx_data: #1 orig_iov_len %u,"
- " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
- pr_debug("rx_data: #2 rx_marker %u, size"
- " %u\n", *rx_marker, size);
-
- if (orig_iov_len >= *rx_marker) {
- iov[iov_count].iov_len = *rx_marker;
- iov[iov_count++].iov_base =
- (iov_record[orig_iov_loc].iov_base +
- per_iov_bytes);
-
- iov[iov_count].iov_len = (MARKER_SIZE / 2);
- iov[iov_count++].iov_base =
- &rx_marker_val[rx_marker_iov++];
- iov[iov_count].iov_len = (MARKER_SIZE / 2);
- iov[iov_count++].iov_base =
- &rx_marker_val[rx_marker_iov++];
- old_rx_marker = *rx_marker;
-
- /*
- * OFMarkInt is in 32-bit words.
- */
- *rx_marker = (conn->conn_ops->OFMarkInt * 4);
- size -= old_rx_marker;
- orig_iov_len -= old_rx_marker;
- per_iov_bytes += old_rx_marker;
-
- pr_debug("rx_data: #3 new_rx_marker"
- " %u, size %u\n", *rx_marker, size);
- } else {
- iov[iov_count].iov_len = orig_iov_len;
- iov[iov_count++].iov_base =
- (iov_record[orig_iov_loc].iov_base +
- per_iov_bytes);
-
- per_iov_bytes = 0;
- *rx_marker -= orig_iov_len;
- size -= orig_iov_len;
-
- if (size)
- orig_iov_len =
- iov_record[++orig_iov_loc].iov_len;
-
- pr_debug("rx_data: #4 new_rx_marker"
- " %u, size %u\n", *rx_marker, size);
- }
- }
- data += (rx_marker_iov * (MARKER_SIZE / 2));
-
- iov_p = &iov[0];
- iov_len = iov_count;
-
- if (iov_count > count->ss_iov_count) {
- pr_err("iov_count: %d, count->ss_iov_count:"
- " %d\n", iov_count, count->ss_iov_count);
- return -1;
- }
- if (rx_marker_iov > count->ss_marker_count) {
- pr_err("rx_marker_iov: %d, count->ss_marker"
- "_count: %d\n", rx_marker_iov,
- count->ss_marker_count);
- return -1;
- }
- } else {
- iov_p = count->iov;
- iov_len = count->iov_count;
- }
+ iov_p = count->iov;
+ iov_len = count->iov_count;
while (total_rx < data) {
rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1541,16 +1447,6 @@ static int iscsit_do_rx_data(
rx_loop, total_rx, data);
}
- if (count->sync_and_steering) {
- int j;
- for (j = 0; j < rx_marker_iov; j++) {
- pr_debug("rx_data: #5 j: %d, offset: %d\n",
- j, rx_marker_val[j]);
- conn->of_marker_offset = rx_marker_val[j];
- }
- total_rx -= (rx_marker_iov * (MARKER_SIZE / 2));
- }
-
return total_rx;
}
@@ -1559,8 +1455,7 @@ static int iscsit_do_tx_data(
struct iscsi_data_count *count)
{
int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
- u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0;
- struct kvec iov[count->ss_iov_count], *iov_p;
+ struct kvec *iov_p;
struct msghdr msg;
if (!conn || !conn->sock || !conn->conn_ops)
@@ -1573,98 +1468,8 @@ static int iscsit_do_tx_data(
memset(&msg, 0, sizeof(struct msghdr));
- if (count->sync_and_steering) {
- int size = 0;
- u32 i, orig_iov_count = 0;
- u32 orig_iov_len = 0, orig_iov_loc = 0;
- u32 iov_count = 0, per_iov_bytes = 0;
- u32 *tx_marker, old_tx_marker = 0;
- struct kvec *iov_record;
-
- memset(&tx_marker_val, 0,
- count->ss_marker_count * sizeof(u32));
- memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
-
- iov_record = count->iov;
- orig_iov_count = count->iov_count;
- tx_marker = &conn->if_marker;
-
- i = 0;
- size = data;
- orig_iov_len = iov_record[orig_iov_loc].iov_len;
- while (size > 0) {
- pr_debug("tx_data: #1 orig_iov_len %u,"
- " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
- pr_debug("tx_data: #2 tx_marker %u, size"
- " %u\n", *tx_marker, size);
-
- if (orig_iov_len >= *tx_marker) {
- iov[iov_count].iov_len = *tx_marker;
- iov[iov_count++].iov_base =
- (iov_record[orig_iov_loc].iov_base +
- per_iov_bytes);
-
- tx_marker_val[tx_marker_iov] =
- (size - *tx_marker);
- iov[iov_count].iov_len = (MARKER_SIZE / 2);
- iov[iov_count++].iov_base =
- &tx_marker_val[tx_marker_iov++];
- iov[iov_count].iov_len = (MARKER_SIZE / 2);
- iov[iov_count++].iov_base =
- &tx_marker_val[tx_marker_iov++];
- old_tx_marker = *tx_marker;
-
- /*
- * IFMarkInt is in 32-bit words.
- */
- *tx_marker = (conn->conn_ops->IFMarkInt * 4);
- size -= old_tx_marker;
- orig_iov_len -= old_tx_marker;
- per_iov_bytes += old_tx_marker;
-
- pr_debug("tx_data: #3 new_tx_marker"
- " %u, size %u\n", *tx_marker, size);
- pr_debug("tx_data: #4 offset %u\n",
- tx_marker_val[tx_marker_iov-1]);
- } else {
- iov[iov_count].iov_len = orig_iov_len;
- iov[iov_count++].iov_base
- = (iov_record[orig_iov_loc].iov_base +
- per_iov_bytes);
-
- per_iov_bytes = 0;
- *tx_marker -= orig_iov_len;
- size -= orig_iov_len;
-
- if (size)
- orig_iov_len =
- iov_record[++orig_iov_loc].iov_len;
-
- pr_debug("tx_data: #5 new_tx_marker"
- " %u, size %u\n", *tx_marker, size);
- }
- }
-
- data += (tx_marker_iov * (MARKER_SIZE / 2));
-
- iov_p = &iov[0];
- iov_len = iov_count;
-
- if (iov_count > count->ss_iov_count) {
- pr_err("iov_count: %d, count->ss_iov_count:"
- " %d\n", iov_count, count->ss_iov_count);
- return -1;
- }
- if (tx_marker_iov > count->ss_marker_count) {
- pr_err("tx_marker_iov: %d, count->ss_marker"
- "_count: %d\n", tx_marker_iov,
- count->ss_marker_count);
- return -1;
- }
- } else {
- iov_p = count->iov;
- iov_len = count->iov_count;
- }
+ iov_p = count->iov;
+ iov_len = count->iov_count;
while (total_tx < data) {
tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1679,9 +1484,6 @@ static int iscsit_do_tx_data(
tx_loop, total_tx, data);
}
- if (count->sync_and_steering)
- total_tx -= (tx_marker_iov * (MARKER_SIZE / 2));
-
return total_tx;
}
@@ -1702,12 +1504,6 @@ int rx_data(
c.data_length = data;
c.type = ISCSI_RX_DATA;
- if (conn->conn_ops->OFMarker &&
- (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
- if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
- return -1;
- }
-
return iscsit_do_rx_data(conn, &c);
}
@@ -1728,12 +1524,6 @@ int tx_data(
c.data_length = data;
c.type = ISCSI_TX_DATA;
- if (conn->conn_ops->IFMarker &&
- (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
- if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
- return -1;
- }
-
return iscsit_do_tx_data(conn, &c);
}
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 2cd49d607bd..835bf7de028 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -30,6 +30,7 @@ extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_c
extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
extern void iscsit_release_cmd(struct iscsi_cmd *);
+extern void iscsit_free_cmd(struct iscsi_cmd *);
extern int iscsit_check_session_usage_count(struct iscsi_session *);
extern void iscsit_dec_session_usage_count(struct iscsi_session *);
extern void iscsit_inc_session_usage_count(struct iscsi_session *);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index aa2d6799723..b15d8cbf630 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -200,7 +200,7 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd)
* Release the struct se_cmd, which will make a callback to release
* struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
*/
- transport_generic_free_cmd(se_cmd, 0, 0);
+ transport_generic_free_cmd(se_cmd, 0);
}
static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
@@ -290,6 +290,15 @@ static int tcm_loop_queuecommand(
*/
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+ /*
+ * Ensure that this tl_tpg reference from the incoming sc->device->id
+ * has already been configured via tcm_loop_make_naa_tpg().
+ */
+ if (!tl_tpg->tl_hba) {
+ set_host_byte(sc, DID_NO_CONNECT);
+ sc->scsi_done(sc);
+ return 0;
+ }
se_tpg = &tl_tpg->tl_se_tpg;
/*
* Determine the SAM Task Attribute and allocate tl_cmd and
@@ -366,7 +375,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
* Allocate the LUN_RESET TMR
*/
se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr,
- TMR_LUN_RESET);
+ TMR_LUN_RESET, GFP_KERNEL);
if (IS_ERR(se_cmd->se_tmr_req))
goto release;
/*
@@ -388,7 +397,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
SUCCESS : FAILED;
release:
if (se_cmd)
- transport_generic_free_cmd(se_cmd, 1, 0);
+ transport_generic_free_cmd(se_cmd, 1);
else
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
kfree(tl_tmr);
@@ -1245,6 +1254,9 @@ void tcm_loop_drop_naa_tpg(
*/
core_tpg_deregister(se_tpg);
+ tl_tpg->tl_hba = NULL;
+ tl_tpg->tl_tpgt = 0;
+
pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 007c6c298b8..8f4447749c7 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -67,6 +67,15 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
unsigned char *buf;
u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
Target port group descriptor */
+ /*
+ * Need at least 4 bytes of response data or else we can't
+ * even fit the return data length.
+ */
+ if (cmd->data_length < 4) {
+ pr_warn("REPORT TARGET PORT GROUPS allocation length %u"
+ " too small\n", cmd->data_length);
+ return -EINVAL;
+ }
buf = transport_kmap_first_data_page(cmd);
@@ -74,6 +83,17 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
/*
+ * Check if the Target port group and Target port descriptor list
+ * based on tg_pt_gp_members count will fit into the response payload.
+ * Otherwise, bump rd_len to let the initiator know we have exceeded
+ * the allocation length and the response is truncated.
+ */
+ if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
+ cmd->data_length) {
+ rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
+ continue;
+ }
+ /*
* PREF: Preferred target port bit, determine if this
* bit should be set for port group.
*/
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 89ae923c5da..38535eb1392 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -24,6 +24,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@@ -154,6 +155,37 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
+static void
+target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf)
+{
+ unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
+ int cnt;
+ bool next = true;
+
+ /*
+ * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
+ * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
+ * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
+ * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
+ * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
+ * per device uniqeness.
+ */
+ for (cnt = 0; *p && cnt < 13; p++) {
+ int val = hex_to_bin(*p);
+
+ if (val < 0)
+ continue;
+
+ if (next) {
+ next = false;
+ buf[cnt++] |= val;
+ } else {
+ next = true;
+ buf[cnt] = val << 4;
+ }
+ }
+}
+
/*
* Device identification VPD, for a complete list of
* DESIGNATOR TYPEs see spc4r17 Table 459.
@@ -219,8 +251,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
* VENDOR_SPECIFIC_IDENTIFIER and
* VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
*/
- buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]);
- hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12);
+ target_parse_naa_6h_vendor_specific(dev, &buf[off]);
len = 20;
off = (len + 4);
@@ -1235,3 +1266,52 @@ transport_emulate_control_cdb(struct se_task *task)
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
+
+/*
+ * Write a CDB into @cdb that is based on the one the intiator sent us,
+ * but updated to only cover the sectors that the current task handles.
+ */
+void target_get_task_cdb(struct se_task *task, unsigned char *cdb)
+{
+ struct se_cmd *cmd = task->task_se_cmd;
+ unsigned int cdb_len = scsi_command_size(cmd->t_task_cdb);
+
+ memcpy(cdb, cmd->t_task_cdb, cdb_len);
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+ unsigned long long lba = task->task_lba;
+ u32 sectors = task->task_sectors;
+
+ switch (cdb_len) {
+ case 6:
+ /* 21-bit LBA and 8-bit sectors */
+ cdb[1] = (lba >> 16) & 0x1f;
+ cdb[2] = (lba >> 8) & 0xff;
+ cdb[3] = lba & 0xff;
+ cdb[4] = sectors & 0xff;
+ break;
+ case 10:
+ /* 32-bit LBA and 16-bit sectors */
+ put_unaligned_be32(lba, &cdb[2]);
+ put_unaligned_be16(sectors, &cdb[7]);
+ break;
+ case 12:
+ /* 32-bit LBA and 32-bit sectors */
+ put_unaligned_be32(lba, &cdb[2]);
+ put_unaligned_be32(sectors, &cdb[6]);
+ break;
+ case 16:
+ /* 64-bit LBA and 32-bit sectors */
+ put_unaligned_be64(lba, &cdb[2]);
+ put_unaligned_be32(sectors, &cdb[10]);
+ break;
+ case 32:
+ /* 64-bit LBA and 32-bit sectors, extended CDB */
+ put_unaligned_be64(lba, &cdb[12]);
+ put_unaligned_be32(sectors, &cdb[28]);
+ break;
+ default:
+ BUG();
+ }
+ }
+}
+EXPORT_SYMBOL(target_get_task_cdb);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index f37e2b9cbbd..e0c1e8a8dd4 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -132,14 +132,6 @@ static struct config_group *target_core_register_fabric(
pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
" %s\n", group, name);
/*
- * Ensure that TCM subsystem plugins are loaded at this point for
- * using the RAMDISK_DR virtual LUN 0 and all other struct se_port
- * LUN symlinks.
- */
- if (transport_subsystem_check_init() < 0)
- return ERR_PTR(-EINVAL);
-
- /*
* Below are some hardcoded request_module() calls to automatically
* local fabric modules when the following is called:
*
@@ -724,9 +716,6 @@ SE_DEV_ATTR_RO(hw_queue_depth);
DEF_DEV_ATTRIB(queue_depth);
SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
-DEF_DEV_ATTRIB(task_timeout);
-SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR);
-
DEF_DEV_ATTRIB(max_unmap_lba_count);
SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
@@ -760,7 +749,6 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
&target_core_dev_attrib_optimal_sectors.attr,
&target_core_dev_attrib_hw_queue_depth.attr,
&target_core_dev_attrib_queue_depth.attr,
- &target_core_dev_attrib_task_timeout.attr,
&target_core_dev_attrib_max_unmap_lba_count.attr,
&target_core_dev_attrib_max_unmap_block_desc_count.attr,
&target_core_dev_attrib_unmap_granularity.attr,
@@ -3079,8 +3067,7 @@ static struct config_group *target_core_call_addhbatotarget(
/*
* Load up TCM subsystem plugins if they have not already been loaded.
*/
- if (transport_subsystem_check_init() < 0)
- return ERR_PTR(-EINVAL);
+ transport_subsystem_check_init();
hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
if (IS_ERR(hba))
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index ca6e4a4df13..f870c3bcfd8 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -914,21 +914,6 @@ void se_dev_set_default_attribs(
dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
}
-int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
-{
- if (task_timeout > DA_TASK_TIMEOUT_MAX) {
- pr_err("dev[%p]: Passed task_timeout: %u larger then"
- " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
- return -EINVAL;
- } else {
- dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout;
- pr_debug("dev[%p]: Set SE Device task_timeout: %u\n",
- dev, task_timeout);
- }
-
- return 0;
-}
-
int se_dev_set_max_unmap_lba_count(
struct se_device *dev,
u32 max_unmap_lba_count)
@@ -972,36 +957,24 @@ int se_dev_set_unmap_granularity_alignment(
int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
{
- if ((flag != 0) && (flag != 1)) {
+ if (flag != 0 && flag != 1) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- if (dev->transport->dpo_emulated == NULL) {
- pr_err("dev->transport->dpo_emulated is NULL\n");
- return -EINVAL;
- }
- if (dev->transport->dpo_emulated(dev) == 0) {
- pr_err("dev->transport->dpo_emulated not supported\n");
- return -EINVAL;
- }
- dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag;
- pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation"
- " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo);
- return 0;
+
+ pr_err("dpo_emulated not supported\n");
+ return -EINVAL;
}
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
{
- if ((flag != 0) && (flag != 1)) {
+ if (flag != 0 && flag != 1) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- if (dev->transport->fua_write_emulated == NULL) {
- pr_err("dev->transport->fua_write_emulated is NULL\n");
- return -EINVAL;
- }
- if (dev->transport->fua_write_emulated(dev) == 0) {
- pr_err("dev->transport->fua_write_emulated not supported\n");
+
+ if (dev->transport->fua_write_emulated == 0) {
+ pr_err("fua_write_emulated not supported\n");
return -EINVAL;
}
dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
@@ -1012,36 +985,23 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
{
- if ((flag != 0) && (flag != 1)) {
+ if (flag != 0 && flag != 1) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- if (dev->transport->fua_read_emulated == NULL) {
- pr_err("dev->transport->fua_read_emulated is NULL\n");
- return -EINVAL;
- }
- if (dev->transport->fua_read_emulated(dev) == 0) {
- pr_err("dev->transport->fua_read_emulated not supported\n");
- return -EINVAL;
- }
- dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag;
- pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n",
- dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read);
- return 0;
+
+ pr_err("ua read emulated not supported\n");
+ return -EINVAL;
}
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
{
- if ((flag != 0) && (flag != 1)) {
+ if (flag != 0 && flag != 1) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- if (dev->transport->write_cache_emulated == NULL) {
- pr_err("dev->transport->write_cache_emulated is NULL\n");
- return -EINVAL;
- }
- if (dev->transport->write_cache_emulated(dev) == 0) {
- pr_err("dev->transport->write_cache_emulated not supported\n");
+ if (dev->transport->write_cache_emulated == 0) {
+ pr_err("write_cache_emulated not supported\n");
return -EINVAL;
}
dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index c4ea3a9a555..39f021b855e 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -63,6 +63,7 @@ u32 sas_get_pr_transport_id(
unsigned char *buf)
{
unsigned char *ptr;
+ int ret;
/*
* Set PROTOCOL IDENTIFIER to 6h for SAS
@@ -74,7 +75,9 @@ u32 sas_get_pr_transport_id(
*/
ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
- hex2bin(&buf[4], ptr, 8);
+ ret = hex2bin(&buf[4], ptr, 8);
+ if (ret < 0)
+ pr_debug("sas transport_id: invalid hex string\n");
/*
* The SAS Transport ID is a hardcoded 24-byte length
@@ -156,8 +159,9 @@ u32 fc_get_pr_transport_id(
unsigned char *buf)
{
unsigned char *ptr;
- int i;
+ int i, ret;
u32 off = 8;
+
/*
* PROTOCOL IDENTIFIER is 0h for FCP-2
*
@@ -174,7 +178,9 @@ u32 fc_get_pr_transport_id(
i++;
continue;
}
- hex2bin(&buf[off++], &ptr[i], 1);
+ ret = hex2bin(&buf[off++], &ptr[i], 1);
+ if (ret < 0)
+ pr_debug("fc transport_id: invalid hex string\n");
i += 2;
}
/*
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 99c9db00339..19a0be9c657 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -272,13 +272,14 @@ fd_alloc_task(unsigned char *cdb)
static int fd_do_readv(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
- struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
+ struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
+ struct fd_dev *dev = se_dev->dev_ptr;
struct file *fd = dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
loff_t pos = (task->task_lba *
- task->se_dev->se_sub_dev->se_dev_attrib.block_size);
+ se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret = 0, i;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
@@ -324,13 +325,14 @@ static int fd_do_readv(struct se_task *task)
static int fd_do_writev(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
- struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
+ struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
+ struct fd_dev *dev = se_dev->dev_ptr;
struct file *fd = dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
loff_t pos = (task->task_lba *
- task->se_dev->se_sub_dev->se_dev_attrib.block_size);
+ se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret, i = 0;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
@@ -398,33 +400,6 @@ static void fd_emulate_sync_cache(struct se_task *task)
}
/*
- * Tell TCM Core that we are capable of WriteCache emulation for
- * an underlying struct se_device.
- */
-static int fd_emulated_write_cache(struct se_device *dev)
-{
- return 1;
-}
-
-static int fd_emulated_dpo(struct se_device *dev)
-{
- return 0;
-}
-/*
- * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
- * for TYPE_DISK.
- */
-static int fd_emulated_fua_write(struct se_device *dev)
-{
- return 1;
-}
-
-static int fd_emulated_fua_read(struct se_device *dev)
-{
- return 0;
-}
-
-/*
* WRITE Force Unit Access (FUA) emulation on a per struct se_task
* LBA range basis..
*/
@@ -607,17 +582,6 @@ static ssize_t fd_show_configfs_dev_params(
return bl;
}
-/* fd_get_cdb(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static unsigned char *fd_get_cdb(struct se_task *task)
-{
- struct fd_request *req = FILE_REQ(task);
-
- return req->fd_scsi_cdb;
-}
-
/* fd_get_device_rev(): (Part of se_subsystem_api_t template)
*
*
@@ -649,15 +613,13 @@ static struct se_subsystem_api fileio_template = {
.name = "fileio",
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
+ .write_cache_emulated = 1,
+ .fua_write_emulated = 1,
.attach_hba = fd_attach_hba,
.detach_hba = fd_detach_hba,
.allocate_virtdevice = fd_allocate_virtdevice,
.create_virtdevice = fd_create_virtdevice,
.free_device = fd_free_device,
- .dpo_emulated = fd_emulated_dpo,
- .fua_write_emulated = fd_emulated_fua_write,
- .fua_read_emulated = fd_emulated_fua_read,
- .write_cache_emulated = fd_emulated_write_cache,
.alloc_task = fd_alloc_task,
.do_task = fd_do_task,
.do_sync_cache = fd_emulate_sync_cache,
@@ -665,7 +627,6 @@ static struct se_subsystem_api fileio_template = {
.check_configfs_dev_params = fd_check_configfs_dev_params,
.set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params,
- .get_cdb = fd_get_cdb,
.get_device_rev = fd_get_device_rev,
.get_device_type = fd_get_device_type,
.get_blocks = fd_get_blocks,
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index daebd710b89..59e6e73106c 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -14,9 +14,7 @@
struct fd_request {
struct se_task fd_task;
- /* SCSI CDB from iSCSI Command PDU */
- unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
-} ____cacheline_aligned;
+};
#define FBDF_HAS_PATH 0x01
#define FBDF_HAS_SIZE 0x02
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7f0cc53b458..41ad02b5fb8 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -313,104 +313,42 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
return blocks_long;
}
+static void iblock_end_io_flush(struct bio *bio, int err)
+{
+ struct se_cmd *cmd = bio->bi_private;
+
+ if (err)
+ pr_err("IBLOCK: cache flush failed: %d\n", err);
+
+ if (cmd)
+ transport_complete_sync_cache(cmd, err == 0);
+ bio_put(bio);
+}
+
/*
- * Emulate SYCHRONIZE_CACHE_*
+ * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
+ * always flush the whole cache.
*/
static void iblock_emulate_sync_cache(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
int immed = (cmd->t_task_cdb[1] & 0x2);
- sector_t error_sector;
- int ret;
+ struct bio *bio;
/*
* If the Immediate bit is set, queue up the GOOD response
- * for this SYNCHRONIZE_CACHE op
+ * for this SYNCHRONIZE_CACHE op.
*/
if (immed)
transport_complete_sync_cache(cmd, 1);
- /*
- * blkdev_issue_flush() does not support a specifying a range, so
- * we have to flush the entire cache.
- */
- ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
- if (ret != 0) {
- pr_err("IBLOCK: block_issue_flush() failed: %d "
- " error_sector: %llu\n", ret,
- (unsigned long long)error_sector);
- }
-
+ bio = bio_alloc(GFP_KERNEL, 0);
+ bio->bi_end_io = iblock_end_io_flush;
+ bio->bi_bdev = ib_dev->ibd_bd;
if (!immed)
- transport_complete_sync_cache(cmd, ret == 0);
-}
-
-/*
- * Tell TCM Core that we are capable of WriteCache emulation for
- * an underlying struct se_device.
- */
-static int iblock_emulated_write_cache(struct se_device *dev)
-{
- return 1;
-}
-
-static int iblock_emulated_dpo(struct se_device *dev)
-{
- return 0;
-}
-
-/*
- * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
- * for TYPE_DISK.
- */
-static int iblock_emulated_fua_write(struct se_device *dev)
-{
- return 1;
-}
-
-static int iblock_emulated_fua_read(struct se_device *dev)
-{
- return 0;
-}
-
-static int iblock_do_task(struct se_task *task)
-{
- struct se_device *dev = task->task_se_cmd->se_dev;
- struct iblock_req *req = IBLOCK_REQ(task);
- struct bio *bio = req->ib_bio, *nbio = NULL;
- struct blk_plug plug;
- int rw;
-
- if (task->task_data_direction == DMA_TO_DEVICE) {
- /*
- * Force data to disk if we pretend to not have a volatile
- * write cache, or the initiator set the Force Unit Access bit.
- */
- if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
- (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
- task->task_se_cmd->t_tasks_fua))
- rw = WRITE_FUA;
- else
- rw = WRITE;
- } else {
- rw = READ;
- }
-
- blk_start_plug(&plug);
- while (bio) {
- nbio = bio->bi_next;
- bio->bi_next = NULL;
- pr_debug("Calling submit_bio() task: %p bio: %p"
- " bio->bi_sector: %llu\n", task, bio,
- (unsigned long long)bio->bi_sector);
-
- submit_bio(rw, bio);
- bio = nbio;
- }
- blk_finish_plug(&plug);
-
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ bio->bi_private = cmd;
+ submit_bio(WRITE_FLUSH, bio);
}
static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
@@ -424,20 +362,7 @@ static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
static void iblock_free_task(struct se_task *task)
{
- struct iblock_req *req = IBLOCK_REQ(task);
- struct bio *bio, *hbio = req->ib_bio;
- /*
- * We only release the bio(s) here if iblock_bio_done() has not called
- * bio_put() -> iblock_bio_destructor().
- */
- while (hbio != NULL) {
- bio = hbio;
- hbio = hbio->bi_next;
- bio->bi_next = NULL;
- bio_put(bio);
- }
-
- kfree(req);
+ kfree(IBLOCK_REQ(task));
}
enum {
@@ -551,25 +476,21 @@ static ssize_t iblock_show_configfs_dev_params(
static void iblock_bio_destructor(struct bio *bio)
{
struct se_task *task = bio->bi_private;
- struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
+ struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
bio_free(bio, ib_dev->ibd_bio_set);
}
-static struct bio *iblock_get_bio(
- struct se_task *task,
- struct iblock_req *ib_req,
- struct iblock_dev *ib_dev,
- int *ret,
- sector_t lba,
- u32 sg_num)
+static struct bio *
+iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
{
+ struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
+ struct iblock_req *ib_req = IBLOCK_REQ(task);
struct bio *bio;
bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
if (!bio) {
pr_err("Unable to allocate memory for bio\n");
- *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
return NULL;
}
@@ -590,17 +511,33 @@ static struct bio *iblock_get_bio(
return bio;
}
-static int iblock_map_data_SG(struct se_task *task)
+static int iblock_do_task(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
- struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
- struct iblock_req *ib_req = IBLOCK_REQ(task);
- struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
+ struct bio *bio;
+ struct bio_list list;
struct scatterlist *sg;
- int ret = 0;
u32 i, sg_num = task->task_sg_nents;
sector_t block_lba;
+ struct blk_plug plug;
+ int rw;
+
+ if (task->task_data_direction == DMA_TO_DEVICE) {
+ /*
+ * Force data to disk if we pretend to not have a volatile
+ * write cache, or the initiator set the Force Unit Access bit.
+ */
+ if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
+ (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+ task->task_se_cmd->t_tasks_fua))
+ rw = WRITE_FUA;
+ else
+ rw = WRITE;
+ } else {
+ rw = READ;
+ }
+
/*
* Do starting conversion up from non 512-byte blocksize with
* struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
@@ -619,68 +556,43 @@ static int iblock_map_data_SG(struct se_task *task)
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
- bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
+ bio = iblock_get_bio(task, block_lba, sg_num);
if (!bio)
- return ret;
+ return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+
+ bio_list_init(&list);
+ bio_list_add(&list, bio);
- ib_req->ib_bio = bio;
- hbio = tbio = bio;
- /*
- * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
- * from task->task_sg -> struct scatterlist memory.
- */
for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
- pr_debug("task: %p bio: %p Calling bio_add_page(): page:"
- " %p len: %u offset: %u\n", task, bio, sg_page(sg),
- sg->length, sg->offset);
-again:
- ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
- if (ret != sg->length) {
-
- pr_debug("*** Set bio->bi_sector: %llu\n",
- (unsigned long long)bio->bi_sector);
- pr_debug("** task->task_size: %u\n",
- task->task_size);
- pr_debug("*** bio->bi_max_vecs: %u\n",
- bio->bi_max_vecs);
- pr_debug("*** bio->bi_vcnt: %u\n",
- bio->bi_vcnt);
-
- bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
- block_lba, sg_num);
+ /*
+ * XXX: if the length the device accepts is shorter than the
+ * length of the S/G list entry this will cause and
+ * endless loop. Better hope no driver uses huge pages.
+ */
+ while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
+ != sg->length) {
+ bio = iblock_get_bio(task, block_lba, sg_num);
if (!bio)
goto fail;
-
- tbio = tbio->bi_next = bio;
- pr_debug("-----------------> Added +1 bio: %p to"
- " list, Going to again\n", bio);
- goto again;
+ bio_list_add(&list, bio);
}
+
/* Always in 512 byte units for Linux/Block */
block_lba += sg->length >> IBLOCK_LBA_SHIFT;
sg_num--;
- pr_debug("task: %p bio-add_page() passed!, decremented"
- " sg_num to %u\n", task, sg_num);
- pr_debug("task: %p bio_add_page() passed!, increased lba"
- " to %llu\n", task, (unsigned long long)block_lba);
- pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:"
- " %u\n", task, bio->bi_vcnt);
}
- return 0;
+ blk_start_plug(&plug);
+ while ((bio = bio_list_pop(&list)))
+ submit_bio(rw, bio);
+ blk_finish_plug(&plug);
+
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+
fail:
- while (hbio) {
- bio = hbio;
- hbio = hbio->bi_next;
- bio->bi_next = NULL;
+ while ((bio = bio_list_pop(&list)))
bio_put(bio);
- }
- return ret;
-}
-
-static unsigned char *iblock_get_cdb(struct se_task *task)
-{
- return IBLOCK_REQ(task)->ib_scsi_cdb;
+ return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
}
static u32 iblock_get_device_rev(struct se_device *dev)
@@ -706,6 +618,7 @@ static void iblock_bio_done(struct bio *bio, int err)
{
struct se_task *task = bio->bi_private;
struct iblock_req *ibr = IBLOCK_REQ(task);
+
/*
* Set -EIO if !BIO_UPTODATE and the passed is still err=0
*/
@@ -720,50 +633,31 @@ static void iblock_bio_done(struct bio *bio, int err)
*/
atomic_inc(&ibr->ib_bio_err_cnt);
smp_mb__after_atomic_inc();
- bio_put(bio);
- /*
- * Wait to complete the task until the last bio as completed.
- */
- if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
- return;
-
- ibr->ib_bio = NULL;
- transport_complete_task(task, 0);
- return;
}
- pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
- task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err);
- /*
- * bio_put() will call iblock_bio_destructor() to release the bio back
- * to ibr->ib_bio_set.
- */
+
bio_put(bio);
- /*
- * Wait to complete the task until the last bio as completed.
- */
+
if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
return;
- /*
- * Return GOOD status for task if zero ib_bio_err_cnt exists.
- */
- ibr->ib_bio = NULL;
- transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt)));
+
+ pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
+ task, bio, task->task_lba,
+ (unsigned long long)bio->bi_sector, err);
+
+ transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt));
}
static struct se_subsystem_api iblock_template = {
.name = "iblock",
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
- .map_data_SG = iblock_map_data_SG,
+ .write_cache_emulated = 1,
+ .fua_write_emulated = 1,
.attach_hba = iblock_attach_hba,
.detach_hba = iblock_detach_hba,
.allocate_virtdevice = iblock_allocate_virtdevice,
.create_virtdevice = iblock_create_virtdevice,
.free_device = iblock_free_device,
- .dpo_emulated = iblock_emulated_dpo,
- .fua_write_emulated = iblock_emulated_fua_write,
- .fua_read_emulated = iblock_emulated_fua_read,
- .write_cache_emulated = iblock_emulated_write_cache,
.alloc_task = iblock_alloc_task,
.do_task = iblock_do_task,
.do_discard = iblock_do_discard,
@@ -772,7 +666,6 @@ static struct se_subsystem_api iblock_template = {
.check_configfs_dev_params = iblock_check_configfs_dev_params,
.set_configfs_dev_params = iblock_set_configfs_dev_params,
.show_configfs_dev_params = iblock_show_configfs_dev_params,
- .get_cdb = iblock_get_cdb,
.get_device_rev = iblock_get_device_rev,
.get_device_type = iblock_get_device_type,
.get_blocks = iblock_get_blocks,
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index a121cd1b657..5cf1860c10d 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -8,10 +8,8 @@
struct iblock_req {
struct se_task ib_task;
- unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE];
atomic_t ib_bio_cnt;
atomic_t ib_bio_err_cnt;
- struct bio *ib_bio;
} ____cacheline_aligned;
#define IBDF_HAS_UDEV_PATH 0x01
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 77d72588641..dad671dee9e 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -566,7 +566,7 @@ static struct se_device *pscsi_create_virtdevice(
if (IS_ERR(sh)) {
pr_err("pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id);
- return (struct se_device *) sh;
+ return ERR_CAST(sh);
}
}
} else {
@@ -676,7 +676,7 @@ static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)
*/
static int pscsi_transport_complete(struct se_task *task)
{
- struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+ struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
struct scsi_device *sd = pdv->pdv_sd;
int result;
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
@@ -776,95 +776,6 @@ pscsi_alloc_task(unsigned char *cdb)
return &pt->pscsi_task;
}
-static inline void pscsi_blk_init_request(
- struct se_task *task,
- struct pscsi_plugin_task *pt,
- struct request *req,
- int bidi_read)
-{
- /*
- * Defined as "scsi command" in include/linux/blkdev.h.
- */
- req->cmd_type = REQ_TYPE_BLOCK_PC;
- /*
- * For the extra BIDI-COMMAND READ struct request we do not
- * need to setup the remaining structure members
- */
- if (bidi_read)
- return;
- /*
- * Setup the done function pointer for struct request,
- * also set the end_io_data pointer.to struct se_task.
- */
- req->end_io = pscsi_req_done;
- req->end_io_data = task;
- /*
- * Load the referenced struct se_task's SCSI CDB into
- * include/linux/blkdev.h:struct request->cmd
- */
- req->cmd_len = scsi_command_size(pt->pscsi_cdb);
- req->cmd = &pt->pscsi_cdb[0];
- /*
- * Setup pointer for outgoing sense data.
- */
- req->sense = &pt->pscsi_sense[0];
- req->sense_len = 0;
-}
-
-/*
- * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB
-*/
-static int pscsi_blk_get_request(struct se_task *task)
-{
- struct pscsi_plugin_task *pt = PSCSI_TASK(task);
- struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
-
- pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
- (task->task_data_direction == DMA_TO_DEVICE),
- GFP_KERNEL);
- if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) {
- pr_err("PSCSI: blk_get_request() failed: %ld\n",
- IS_ERR(pt->pscsi_req));
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- }
- /*
- * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
- * and setup rq callback, CDB and sense.
- */
- pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
- return 0;
-}
-
-/* pscsi_do_task(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static int pscsi_do_task(struct se_task *task)
-{
- struct pscsi_plugin_task *pt = PSCSI_TASK(task);
- struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
- /*
- * Set the struct request->timeout value based on peripheral
- * device type from SCSI.
- */
- if (pdv->pdv_sd->type == TYPE_DISK)
- pt->pscsi_req->timeout = PS_TIMEOUT_DISK;
- else
- pt->pscsi_req->timeout = PS_TIMEOUT_OTHER;
-
- pt->pscsi_req->retries = PS_RETRY;
- /*
- * Queue the struct request into the struct scsi_device->request_queue.
- * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd
- * descriptor
- */
- blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
- (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
- pscsi_req_done);
-
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
-}
-
static void pscsi_free_task(struct se_task *task)
{
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
@@ -1048,15 +959,12 @@ static inline struct bio *pscsi_get_bio(int sg_num)
return bio;
}
-static int __pscsi_map_SG(
- struct se_task *task,
- struct scatterlist *task_sg,
- u32 task_sg_num,
- int bidi_read)
+static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
+ struct bio **hbio)
{
- struct pscsi_plugin_task *pt = PSCSI_TASK(task);
- struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
- struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
+ struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
+ u32 task_sg_num = task->task_sg_nents;
+ struct bio *bio = NULL, *tbio = NULL;
struct page *page;
struct scatterlist *sg;
u32 data_len = task->task_size, i, len, bytes, off;
@@ -1065,19 +973,8 @@ static int __pscsi_map_SG(
int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
int rw = (task->task_data_direction == DMA_TO_DEVICE);
- if (!task->task_size)
- return 0;
- /*
- * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup
- * the bio_vec maplist from task->task_sg ->
- * struct scatterlist memory. The struct se_task->task_sg[] currently needs
- * to be attached to struct bios for submission to Linux/SCSI using
- * struct request to struct scsi_device->request_queue.
- *
- * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI
- * is ported to upstream SCSI passthrough functionality that accepts
- * struct scatterlist->page_link or struct page as a paraemeter.
- */
+ *hbio = NULL;
+
pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
for_each_sg(task_sg, sg, task_sg_num, i) {
@@ -1114,8 +1011,8 @@ static int __pscsi_map_SG(
* bios need to be added to complete a given
* struct se_task
*/
- if (!hbio)
- hbio = tbio = bio;
+ if (!*hbio)
+ *hbio = tbio = bio;
else
tbio = tbio->bi_next = bio;
}
@@ -1151,92 +1048,82 @@ static int __pscsi_map_SG(
off = 0;
}
}
- /*
- * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
- * primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
- */
- if (!bidi_read) {
- /*
- * Starting with v2.6.31, call blk_make_request() passing in *hbio to
- * allocate the pSCSI task a struct request.
- */
- pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
- hbio, GFP_KERNEL);
- if (!pt->pscsi_req) {
- pr_err("pSCSI: blk_make_request() failed\n");
- goto fail;
- }
- /*
- * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
- * and setup rq callback, CDB and sense.
- */
- pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
-
- return task->task_sg_nents;
- }
- /*
- * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
- * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[]
- */
- pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
- hbio, GFP_KERNEL);
- if (!pt->pscsi_req->next_rq) {
- pr_err("pSCSI: blk_make_request() failed for BIDI\n");
- goto fail;
- }
- pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
return task->task_sg_nents;
fail:
- while (hbio) {
- bio = hbio;
- hbio = hbio->bi_next;
+ while (*hbio) {
+ bio = *hbio;
+ *hbio = (*hbio)->bi_next;
bio->bi_next = NULL;
- bio_endio(bio, 0);
+ bio_endio(bio, 0); /* XXX: should be error */
}
return ret;
}
-/*
- * pSCSI maps both ->map_control_SG() and ->map_data_SG() to a single call.
- */
-static int pscsi_map_SG(struct se_task *task)
+static int pscsi_do_task(struct se_task *task)
{
+ struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
+ struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+ struct request *req;
+ struct bio *hbio;
int ret;
- /*
- * Setup the main struct request for the task->task_sg[] payload
- */
+ target_get_task_cdb(task, pt->pscsi_cdb);
+
+ if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
+ req = blk_get_request(pdv->pdv_sd->request_queue,
+ (task->task_data_direction == DMA_TO_DEVICE),
+ GFP_KERNEL);
+ if (!req || IS_ERR(req)) {
+ pr_err("PSCSI: blk_get_request() failed: %ld\n",
+ req ? IS_ERR(req) : -ENOMEM);
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+ }
+ } else {
+ BUG_ON(!task->task_size);
- ret = __pscsi_map_SG(task, task->task_sg, task->task_sg_nents, 0);
- if (ret >= 0 && task->task_sg_bidi) {
/*
- * If present, set up the extra BIDI-COMMAND SCSI READ
- * struct request and payload.
+ * Setup the main struct request for the task->task_sg[] payload
*/
- ret = __pscsi_map_SG(task, task->task_sg_bidi,
- task->task_sg_nents, 1);
+ ret = pscsi_map_sg(task, task->task_sg, &hbio);
+ if (ret < 0)
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+ req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
+ GFP_KERNEL);
+ if (!req) {
+ pr_err("pSCSI: blk_make_request() failed\n");
+ goto fail;
+ }
}
- if (ret < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- return 0;
-}
+ req->cmd_type = REQ_TYPE_BLOCK_PC;
+ req->end_io = pscsi_req_done;
+ req->end_io_data = task;
+ req->cmd_len = scsi_command_size(pt->pscsi_cdb);
+ req->cmd = &pt->pscsi_cdb[0];
+ req->sense = &pt->pscsi_sense[0];
+ req->sense_len = 0;
+ if (pdv->pdv_sd->type == TYPE_DISK)
+ req->timeout = PS_TIMEOUT_DISK;
+ else
+ req->timeout = PS_TIMEOUT_OTHER;
+ req->retries = PS_RETRY;
-static int pscsi_CDB_none(struct se_task *task)
-{
- return pscsi_blk_get_request(task);
-}
+ blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
+ (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
+ pscsi_req_done);
-/* pscsi_get_cdb():
- *
- *
- */
-static unsigned char *pscsi_get_cdb(struct se_task *task)
-{
- struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+ return PYX_TRANSPORT_SENT_TO_TRANSPORT;
- return pt->pscsi_cdb;
+fail:
+ while (hbio) {
+ struct bio *bio = hbio;
+ hbio = hbio->bi_next;
+ bio->bi_next = NULL;
+ bio_endio(bio, 0); /* XXX: should be error */
+ }
+ return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
}
/* pscsi_get_sense_buffer():
@@ -1327,23 +1214,13 @@ static void pscsi_req_done(struct request *req, int uptodate)
pt->pscsi_resid = req->resid_len;
pscsi_process_SAM_status(task, pt);
- /*
- * Release BIDI-READ if present
- */
- if (req->next_rq != NULL)
- __blk_put_request(req->q, req->next_rq);
-
__blk_put_request(req->q, req);
- pt->pscsi_req = NULL;
}
static struct se_subsystem_api pscsi_template = {
.name = "pscsi",
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_PHBA_PDEV,
- .cdb_none = pscsi_CDB_none,
- .map_control_SG = pscsi_map_SG,
- .map_data_SG = pscsi_map_SG,
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba,
@@ -1357,7 +1234,6 @@ static struct se_subsystem_api pscsi_template = {
.check_configfs_dev_params = pscsi_check_configfs_dev_params,
.set_configfs_dev_params = pscsi_set_configfs_dev_params,
.show_configfs_dev_params = pscsi_show_configfs_dev_params,
- .get_cdb = pscsi_get_cdb,
.get_sense_buffer = pscsi_get_sense_buffer,
.get_device_rev = pscsi_get_device_rev,
.get_device_type = pscsi_get_device_type,
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index ebf4f1ae2c8..fdc17b6aefb 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -27,7 +27,6 @@ struct pscsi_plugin_task {
int pscsi_direction;
int pscsi_result;
u32 pscsi_resid;
- struct request *pscsi_req;
unsigned char pscsi_cdb[0];
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 1ab69f32e66..5158d3846f1 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -350,7 +350,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
static int rd_MEMCPY_read(struct rd_request *req)
{
struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
+ struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
struct rd_dev_sg_table *table;
struct scatterlist *sg_d, *sg_s;
void *dst, *src;
@@ -466,7 +466,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
static int rd_MEMCPY_write(struct rd_request *req)
{
struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
+ struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
struct rd_dev_sg_table *table;
struct scatterlist *sg_d, *sg_s;
void *dst, *src;
@@ -581,7 +581,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
*/
static int rd_MEMCPY_do_task(struct se_task *task)
{
- struct se_device *dev = task->se_dev;
+ struct se_device *dev = task->task_se_cmd->se_dev;
struct rd_request *req = RD_REQ(task);
unsigned long long lba;
int ret;
@@ -691,17 +691,6 @@ static ssize_t rd_show_configfs_dev_params(
return bl;
}
-/* rd_get_cdb(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static unsigned char *rd_get_cdb(struct se_task *task)
-{
- struct rd_request *req = RD_REQ(task);
-
- return req->rd_scsi_cdb;
-}
-
static u32 rd_get_device_rev(struct se_device *dev)
{
return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
@@ -735,7 +724,6 @@ static struct se_subsystem_api rd_mcp_template = {
.check_configfs_dev_params = rd_check_configfs_dev_params,
.set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params,
- .get_cdb = rd_get_cdb,
.get_device_rev = rd_get_device_rev,
.get_device_type = rd_get_device_type,
.get_blocks = rd_get_blocks,
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 0d027732cd0..784e56a0410 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -22,8 +22,6 @@ void rd_module_exit(void);
struct rd_request {
struct se_task rd_task;
- /* SCSI CDB from iSCSI Command PDU */
- unsigned char rd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
/* Offset from start of page */
u32 rd_offset;
/* Starting page in Ramdisk for request */
diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c
deleted file mode 100644
index 72843441d4f..00000000000
--- a/drivers/target/target_core_scdb.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*******************************************************************************
- * Filename: target_core_scdb.c
- *
- * This file contains the generic target engine Split CDB related functions.
- *
- * Copyright (c) 2004-2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
- *
- * Nicholas A. Bellinger <nab@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- ******************************************************************************/
-
-#include <linux/net.h>
-#include <linux/string.h>
-#include <scsi/scsi.h>
-#include <asm/unaligned.h>
-
-#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-
-#include "target_core_scdb.h"
-
-/* split_cdb_XX_6():
- *
- * 21-bit LBA w/ 8-bit SECTORS
- */
-void split_cdb_XX_6(
- unsigned long long lba,
- u32 sectors,
- unsigned char *cdb)
-{
- cdb[1] = (lba >> 16) & 0x1f;
- cdb[2] = (lba >> 8) & 0xff;
- cdb[3] = lba & 0xff;
- cdb[4] = sectors & 0xff;
-}
-
-/* split_cdb_XX_10():
- *
- * 32-bit LBA w/ 16-bit SECTORS
- */
-void split_cdb_XX_10(
- unsigned long long lba,
- u32 sectors,
- unsigned char *cdb)
-{
- put_unaligned_be32(lba, &cdb[2]);
- put_unaligned_be16(sectors, &cdb[7]);
-}
-
-/* split_cdb_XX_12():
- *
- * 32-bit LBA w/ 32-bit SECTORS
- */
-void split_cdb_XX_12(
- unsigned long long lba,
- u32 sectors,
- unsigned char *cdb)
-{
- put_unaligned_be32(lba, &cdb[2]);
- put_unaligned_be32(sectors, &cdb[6]);
-}
-
-/* split_cdb_XX_16():
- *
- * 64-bit LBA w/ 32-bit SECTORS
- */
-void split_cdb_XX_16(
- unsigned long long lba,
- u32 sectors,
- unsigned char *cdb)
-{
- put_unaligned_be64(lba, &cdb[2]);
- put_unaligned_be32(sectors, &cdb[10]);
-}
-
-/*
- * split_cdb_XX_32():
- *
- * 64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32
- */
-void split_cdb_XX_32(
- unsigned long long lba,
- u32 sectors,
- unsigned char *cdb)
-{
- put_unaligned_be64(lba, &cdb[12]);
- put_unaligned_be32(sectors, &cdb[28]);
-}
diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h
deleted file mode 100644
index 48e9ccc9585..00000000000
--- a/drivers/target/target_core_scdb.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef TARGET_CORE_SCDB_H
-#define TARGET_CORE_SCDB_H
-
-extern void split_cdb_XX_6(unsigned long long, u32, unsigned char *);
-extern void split_cdb_XX_10(unsigned long long, u32, unsigned char *);
-extern void split_cdb_XX_12(unsigned long long, u32, unsigned char *);
-extern void split_cdb_XX_16(unsigned long long, u32, unsigned char *);
-extern void split_cdb_XX_32(unsigned long long, u32, unsigned char *);
-
-#endif /* TARGET_CORE_SCDB_H */
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 662473c5404..570b144a1ed 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -43,12 +43,12 @@
struct se_tmr_req *core_tmr_alloc_req(
struct se_cmd *se_cmd,
void *fabric_tmr_ptr,
- u8 function)
+ u8 function,
+ gfp_t gfp_flags)
{
struct se_tmr_req *tmr;
- tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
- GFP_ATOMIC : GFP_KERNEL);
+ tmr = kmem_cache_zalloc(se_tmr_req_cache, gfp_flags);
if (!tmr) {
pr_err("Unable to allocate struct se_tmr_req\n");
return ERR_PTR(-ENOMEM);
@@ -66,15 +66,16 @@ void core_tmr_release_req(
struct se_tmr_req *tmr)
{
struct se_device *dev = tmr->tmr_dev;
+ unsigned long flags;
if (!dev) {
kmem_cache_free(se_tmr_req_cache, tmr);
return;
}
- spin_lock_irq(&dev->se_tmr_lock);
+ spin_lock_irqsave(&dev->se_tmr_lock, flags);
list_del(&tmr->tmr_list);
- spin_unlock_irq(&dev->se_tmr_lock);
+ spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
kmem_cache_free(se_tmr_req_cache, tmr);
}
@@ -99,54 +100,20 @@ static void core_tmr_handle_tas_abort(
transport_cmd_finish_abort(cmd, 0);
}
-int core_tmr_lun_reset(
+static void core_tmr_drain_tmr_list(
struct se_device *dev,
struct se_tmr_req *tmr,
- struct list_head *preempt_and_abort_list,
- struct se_cmd *prout_cmd)
+ struct list_head *preempt_and_abort_list)
{
- struct se_cmd *cmd, *tcmd;
- struct se_node_acl *tmr_nacl = NULL;
- struct se_portal_group *tmr_tpg = NULL;
- struct se_queue_obj *qobj = &dev->dev_queue_obj;
+ LIST_HEAD(drain_tmr_list);
struct se_tmr_req *tmr_p, *tmr_pp;
- struct se_task *task, *task_tmp;
+ struct se_cmd *cmd;
unsigned long flags;
- int fe_count, tas;
- /*
- * TASK_ABORTED status bit, this is configurable via ConfigFS
- * struct se_device attributes. spc4r17 section 7.4.6 Control mode page
- *
- * A task aborted status (TAS) bit set to zero specifies that aborted
- * tasks shall be terminated by the device server without any response
- * to the application client. A TAS bit set to one specifies that tasks
- * aborted by the actions of an I_T nexus other than the I_T nexus on
- * which the command was received shall be completed with TASK ABORTED
- * status (see SAM-4).
- */
- tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
- /*
- * Determine if this se_tmr is coming from a $FABRIC_MOD
- * or struct se_device passthrough..
- */
- if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
- tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
- tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
- if (tmr_nacl && tmr_tpg) {
- pr_debug("LUN_RESET: TMR caller fabric: %s"
- " initiator port %s\n",
- tmr_tpg->se_tpg_tfo->get_fabric_name(),
- tmr_nacl->initiatorname);
- }
- }
- pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
- (preempt_and_abort_list) ? "Preempt" : "TMR",
- dev->transport->name, tas);
/*
* Release all pending and outgoing TMRs aside from the received
* LUN_RESET tmr..
*/
- spin_lock_irq(&dev->se_tmr_lock);
+ spin_lock_irqsave(&dev->se_tmr_lock, flags);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
/*
* Allow the received TMR to return with FUNCTION_COMPLETE.
@@ -168,29 +135,48 @@ int core_tmr_lun_reset(
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
- spin_unlock_irq(&dev->se_tmr_lock);
- spin_lock_irqsave(&cmd->t_state_lock, flags);
+ spin_lock(&cmd->t_state_lock);
if (!atomic_read(&cmd->t_transport_active)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- spin_lock_irq(&dev->se_tmr_lock);
+ spin_unlock(&cmd->t_state_lock);
continue;
}
if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- spin_lock_irq(&dev->se_tmr_lock);
+ spin_unlock(&cmd->t_state_lock);
continue;
}
+ spin_unlock(&cmd->t_state_lock);
+
+ list_move_tail(&tmr->tmr_list, &drain_tmr_list);
+ }
+ spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+
+ while (!list_empty(&drain_tmr_list)) {
+ tmr = list_entry(drain_tmr_list.next, struct se_tmr_req, tmr_list);
+ list_del(&tmr->tmr_list);
+ cmd = tmr_p->task_cmd;
+
pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
" Response: 0x%02x, t_state: %d\n",
- (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
- tmr_p->function, tmr_p->response, cmd->t_state);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ (preempt_and_abort_list) ? "Preempt" : "", tmr,
+ tmr->function, tmr->response, cmd->t_state);
- transport_cmd_finish_abort_tmr(cmd);
- spin_lock_irq(&dev->se_tmr_lock);
+ transport_cmd_finish_abort(cmd, 1);
}
- spin_unlock_irq(&dev->se_tmr_lock);
+}
+
+static void core_tmr_drain_task_list(
+ struct se_device *dev,
+ struct se_cmd *prout_cmd,
+ struct se_node_acl *tmr_nacl,
+ int tas,
+ struct list_head *preempt_and_abort_list)
+{
+ LIST_HEAD(drain_task_list);
+ struct se_cmd *cmd;
+ struct se_task *task, *task_tmp;
+ unsigned long flags;
+ int fe_count;
/*
* Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
* This is following sam4r17, section 5.6 Aborting commands, Table 38
@@ -235,18 +221,28 @@ int core_tmr_lun_reset(
if (prout_cmd == cmd)
continue;
- list_del(&task->t_state_list);
+ list_move_tail(&task->t_state_list, &drain_task_list);
atomic_set(&task->task_state_active, 0);
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+ /*
+ * Remove from task execute list before processing drain_task_list
+ */
+ if (!list_empty(&task->t_execute_list))
+ __transport_remove_task_from_execute_queue(task, dev);
+ }
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+ while (!list_empty(&drain_task_list)) {
+ task = list_entry(drain_task_list.next, struct se_task, t_state_list);
+ list_del(&task->t_state_list);
+ cmd = task->task_se_cmd;
- spin_lock_irqsave(&cmd->t_state_lock, flags);
pr_debug("LUN_RESET: %s cmd: %p task: %p"
- " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
- "def_t_state: %d/%d cdb: 0x%02x\n",
+ " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d"
+ "cdb: 0x%02x\n",
(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
cmd->se_tfo->get_task_tag(cmd), 0,
cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
- cmd->deferred_t_state, cmd->t_task_cdb[0]);
+ cmd->t_task_cdb[0]);
pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
" t_task_cdbs: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d -- t_transport_active: %d"
@@ -259,35 +255,24 @@ int core_tmr_lun_reset(
atomic_read(&cmd->t_transport_stop),
atomic_read(&cmd->t_transport_sent));
- if (atomic_read(&task->task_active)) {
- atomic_set(&task->task_stop, 1);
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
-
- pr_debug("LUN_RESET: Waiting for task: %p to shutdown"
- " for dev: %p\n", task, dev);
- wait_for_completion(&task->task_stop_comp);
- pr_debug("LUN_RESET Completed task: %p shutdown for"
- " dev: %p\n", task, dev);
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- atomic_dec(&cmd->t_task_cdbs_left);
-
- atomic_set(&task->task_active, 0);
- atomic_set(&task->task_stop, 0);
- } else {
- if (atomic_read(&task->task_execute_queue) != 0)
- transport_remove_task_from_execute_queue(task, dev);
- }
- __transport_stop_task_timer(task, &flags);
+ /*
+ * If the command may be queued onto a workqueue cancel it now.
+ *
+ * This is equivalent to removal from the execute queue in the
+ * loop above, but we do it down here given that
+ * cancel_work_sync may block.
+ */
+ if (cmd->t_state == TRANSPORT_COMPLETE)
+ cancel_work_sync(&cmd->work);
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ target_stop_task(task, &flags);
if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
" t_task_cdbs_ex_left: %d\n", task, dev,
atomic_read(&cmd->t_task_cdbs_ex_left));
-
- spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
fe_count = atomic_read(&cmd->t_fe_count);
@@ -297,22 +282,31 @@ int core_tmr_lun_reset(
" task: %p, t_fe_count: %d dev: %p\n", task,
fe_count, dev);
atomic_set(&cmd->t_transport_aborted, 1);
- spin_unlock_irqrestore(&cmd->t_state_lock,
- flags);
- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- spin_lock_irqsave(&dev->execute_task_lock, flags);
+ core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
continue;
}
pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
" t_fe_count: %d dev: %p\n", task, fe_count, dev);
atomic_set(&cmd->t_transport_aborted, 1);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
- spin_lock_irqsave(&dev->execute_task_lock, flags);
+ core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
}
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+}
+
+static void core_tmr_drain_cmd_list(
+ struct se_device *dev,
+ struct se_cmd *prout_cmd,
+ struct se_node_acl *tmr_nacl,
+ int tas,
+ struct list_head *preempt_and_abort_list)
+{
+ LIST_HEAD(drain_cmd_list);
+ struct se_queue_obj *qobj = &dev->dev_queue_obj;
+ struct se_cmd *cmd, *tcmd;
+ unsigned long flags;
/*
* Release all commands remaining in the struct se_device cmd queue.
*
@@ -336,11 +330,26 @@ int core_tmr_lun_reset(
*/
if (prout_cmd == cmd)
continue;
+ /*
+ * Skip direct processing of TRANSPORT_FREE_CMD_INTR for
+ * HW target mode fabrics.
+ */
+ spin_lock(&cmd->t_state_lock);
+ if (cmd->t_state == TRANSPORT_FREE_CMD_INTR) {
+ spin_unlock(&cmd->t_state_lock);
+ continue;
+ }
+ spin_unlock(&cmd->t_state_lock);
- atomic_dec(&cmd->t_transport_queue_active);
+ atomic_set(&cmd->t_transport_queue_active, 0);
atomic_dec(&qobj->queue_cnt);
- list_del(&cmd->se_queue_node);
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+ list_move_tail(&cmd->se_queue_node, &drain_cmd_list);
+ }
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+ while (!list_empty(&drain_cmd_list)) {
+ cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node);
+ list_del_init(&cmd->se_queue_node);
pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
@@ -353,9 +362,53 @@ int core_tmr_lun_reset(
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
atomic_read(&cmd->t_fe_count));
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
}
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+}
+
+int core_tmr_lun_reset(
+ struct se_device *dev,
+ struct se_tmr_req *tmr,
+ struct list_head *preempt_and_abort_list,
+ struct se_cmd *prout_cmd)
+{
+ struct se_node_acl *tmr_nacl = NULL;
+ struct se_portal_group *tmr_tpg = NULL;
+ int tas;
+ /*
+ * TASK_ABORTED status bit, this is configurable via ConfigFS
+ * struct se_device attributes. spc4r17 section 7.4.6 Control mode page
+ *
+ * A task aborted status (TAS) bit set to zero specifies that aborted
+ * tasks shall be terminated by the device server without any response
+ * to the application client. A TAS bit set to one specifies that tasks
+ * aborted by the actions of an I_T nexus other than the I_T nexus on
+ * which the command was received shall be completed with TASK ABORTED
+ * status (see SAM-4).
+ */
+ tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
+ /*
+ * Determine if this se_tmr is coming from a $FABRIC_MOD
+ * or struct se_device passthrough..
+ */
+ if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
+ tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
+ tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
+ if (tmr_nacl && tmr_tpg) {
+ pr_debug("LUN_RESET: TMR caller fabric: %s"
+ " initiator port %s\n",
+ tmr_tpg->se_tpg_tfo->get_fabric_name(),
+ tmr_nacl->initiatorname);
+ }
+ }
+ pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
+ (preempt_and_abort_list) ? "Preempt" : "TMR",
+ dev->transport->name, tas);
+
+ core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
+ core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas,
+ preempt_and_abort_list);
+ core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas,
+ preempt_and_abort_list);
/*
* Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET
@@ -378,3 +431,4 @@ int core_tmr_lun_reset(
dev->transport->name);
return 0;
}
+
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0304e765158..d7525580448 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -54,11 +54,11 @@
#include "target_core_alua.h"
#include "target_core_hba.h"
#include "target_core_pr.h"
-#include "target_core_scdb.h"
#include "target_core_ua.h"
static int sub_api_initialized;
+static struct workqueue_struct *target_completion_wq;
static struct kmem_cache *se_cmd_cache;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_tmr_req_cache;
@@ -69,30 +69,19 @@ struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
-/* Used for transport_dev_get_map_*() */
-typedef int (*map_func_t)(struct se_task *, u32);
-
static int transport_generic_write_pending(struct se_cmd *);
static int transport_processing_thread(void *param);
static int __transport_execute_tasks(struct se_device *dev);
static void transport_complete_task_attr(struct se_cmd *cmd);
-static int transport_complete_qf(struct se_cmd *cmd);
static void transport_handle_queue_full(struct se_cmd *cmd,
- struct se_device *dev, int (*qf_callback)(struct se_cmd *));
-static void transport_direct_request_timeout(struct se_cmd *cmd);
+ struct se_device *dev);
static void transport_free_dev_tasks(struct se_cmd *cmd);
-static u32 transport_allocate_tasks(struct se_cmd *cmd,
- unsigned long long starting_lba,
- enum dma_data_direction data_direction,
- struct scatterlist *sgl, unsigned int nents);
static int transport_generic_get_mem(struct se_cmd *cmd);
-static int transport_generic_remove(struct se_cmd *cmd,
- int session_reinstatement);
-static void transport_release_fe_cmd(struct se_cmd *cmd);
-static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
- struct se_queue_obj *qobj);
+static void transport_put_cmd(struct se_cmd *cmd);
+static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
-static void transport_stop_all_task_timers(struct se_cmd *cmd);
+static void transport_generic_request_failure(struct se_cmd *, int, int);
+static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
{
@@ -108,7 +97,7 @@ int init_se_kmem_caches(void)
if (!se_tmr_req_cache) {
pr_err("kmem_cache_create() for struct se_tmr_req"
" failed\n");
- goto out;
+ goto out_free_cmd_cache;
}
se_sess_cache = kmem_cache_create("se_sess_cache",
sizeof(struct se_session), __alignof__(struct se_session),
@@ -116,14 +105,14 @@ int init_se_kmem_caches(void)
if (!se_sess_cache) {
pr_err("kmem_cache_create() for struct se_session"
" failed\n");
- goto out;
+ goto out_free_tmr_req_cache;
}
se_ua_cache = kmem_cache_create("se_ua_cache",
sizeof(struct se_ua), __alignof__(struct se_ua),
0, NULL);
if (!se_ua_cache) {
pr_err("kmem_cache_create() for struct se_ua failed\n");
- goto out;
+ goto out_free_sess_cache;
}
t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
sizeof(struct t10_pr_registration),
@@ -131,7 +120,7 @@ int init_se_kmem_caches(void)
if (!t10_pr_reg_cache) {
pr_err("kmem_cache_create() for struct t10_pr_registration"
" failed\n");
- goto out;
+ goto out_free_ua_cache;
}
t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
@@ -139,7 +128,7 @@ int init_se_kmem_caches(void)
if (!t10_alua_lu_gp_cache) {
pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
" failed\n");
- goto out;
+ goto out_free_pr_reg_cache;
}
t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
sizeof(struct t10_alua_lu_gp_member),
@@ -147,7 +136,7 @@ int init_se_kmem_caches(void)
if (!t10_alua_lu_gp_mem_cache) {
pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
"cache failed\n");
- goto out;
+ goto out_free_lu_gp_cache;
}
t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
sizeof(struct t10_alua_tg_pt_gp),
@@ -155,7 +144,7 @@ int init_se_kmem_caches(void)
if (!t10_alua_tg_pt_gp_cache) {
pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
"cache failed\n");
- goto out;
+ goto out_free_lu_gp_mem_cache;
}
t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
"t10_alua_tg_pt_gp_mem_cache",
@@ -165,34 +154,41 @@ int init_se_kmem_caches(void)
if (!t10_alua_tg_pt_gp_mem_cache) {
pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
"mem_t failed\n");
- goto out;
+ goto out_free_tg_pt_gp_cache;
}
+ target_completion_wq = alloc_workqueue("target_completion",
+ WQ_MEM_RECLAIM, 0);
+ if (!target_completion_wq)
+ goto out_free_tg_pt_gp_mem_cache;
+
return 0;
+
+out_free_tg_pt_gp_mem_cache:
+ kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+out_free_tg_pt_gp_cache:
+ kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
+out_free_lu_gp_mem_cache:
+ kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
+out_free_lu_gp_cache:
+ kmem_cache_destroy(t10_alua_lu_gp_cache);
+out_free_pr_reg_cache:
+ kmem_cache_destroy(t10_pr_reg_cache);
+out_free_ua_cache:
+ kmem_cache_destroy(se_ua_cache);
+out_free_sess_cache:
+ kmem_cache_destroy(se_sess_cache);
+out_free_tmr_req_cache:
+ kmem_cache_destroy(se_tmr_req_cache);
+out_free_cmd_cache:
+ kmem_cache_destroy(se_cmd_cache);
out:
- if (se_cmd_cache)
- kmem_cache_destroy(se_cmd_cache);
- if (se_tmr_req_cache)
- kmem_cache_destroy(se_tmr_req_cache);
- if (se_sess_cache)
- kmem_cache_destroy(se_sess_cache);
- if (se_ua_cache)
- kmem_cache_destroy(se_ua_cache);
- if (t10_pr_reg_cache)
- kmem_cache_destroy(t10_pr_reg_cache);
- if (t10_alua_lu_gp_cache)
- kmem_cache_destroy(t10_alua_lu_gp_cache);
- if (t10_alua_lu_gp_mem_cache)
- kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
- if (t10_alua_tg_pt_gp_cache)
- kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
- if (t10_alua_tg_pt_gp_mem_cache)
- kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
return -ENOMEM;
}
void release_se_kmem_caches(void)
{
+ destroy_workqueue(target_completion_wq);
kmem_cache_destroy(se_cmd_cache);
kmem_cache_destroy(se_tmr_req_cache);
kmem_cache_destroy(se_sess_cache);
@@ -233,10 +229,13 @@ void transport_init_queue_obj(struct se_queue_obj *qobj)
}
EXPORT_SYMBOL(transport_init_queue_obj);
-static int transport_subsystem_reqmods(void)
+void transport_subsystem_check_init(void)
{
int ret;
+ if (sub_api_initialized)
+ return;
+
ret = request_module("target_core_iblock");
if (ret != 0)
pr_err("Unable to load target_core_iblock\n");
@@ -253,24 +252,8 @@ static int transport_subsystem_reqmods(void)
if (ret != 0)
pr_err("Unable to load target_core_stgt\n");
- return 0;
-}
-
-int transport_subsystem_check_init(void)
-{
- int ret;
-
- if (sub_api_initialized)
- return 0;
- /*
- * Request the loading of known TCM subsystem plugins..
- */
- ret = transport_subsystem_reqmods();
- if (ret < 0)
- return ret;
-
sub_api_initialized = 1;
- return 0;
+ return;
}
struct se_session *transport_init_session(void)
@@ -437,16 +420,15 @@ EXPORT_SYMBOL(transport_deregister_session);
*/
static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
{
- struct se_device *dev;
+ struct se_device *dev = cmd->se_dev;
struct se_task *task;
unsigned long flags;
- list_for_each_entry(task, &cmd->t_task_list, t_list) {
- dev = task->se_dev;
- if (!dev)
- continue;
+ if (!dev)
+ return;
- if (atomic_read(&task->task_active))
+ list_for_each_entry(task, &cmd->t_task_list, t_list) {
+ if (task->task_flags & TF_ACTIVE)
continue;
if (!atomic_read(&task->task_state_active))
@@ -488,8 +470,6 @@ static int transport_cmd_check_stop(
" == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
cmd->se_tfo->get_task_tag(cmd));
- cmd->deferred_t_state = cmd->t_state;
- cmd->t_state = TRANSPORT_DEFERRED_CMD;
atomic_set(&cmd->t_transport_active, 0);
if (transport_off == 2)
transport_all_task_dev_remove_state(cmd);
@@ -507,8 +487,6 @@ static int transport_cmd_check_stop(
" TRUE for ITT: 0x%08x\n", __func__, __LINE__,
cmd->se_tfo->get_task_tag(cmd));
- cmd->deferred_t_state = cmd->t_state;
- cmd->t_state = TRANSPORT_DEFERRED_CMD;
if (transport_off == 2)
transport_all_task_dev_remove_state(cmd);
@@ -593,35 +571,24 @@ check_lun:
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
- transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
- transport_lun_remove_cmd(cmd);
+ if (!cmd->se_tmr_req)
+ transport_lun_remove_cmd(cmd);
if (transport_cmd_check_stop_to_fabric(cmd))
return;
- if (remove)
- transport_generic_remove(cmd, 0);
-}
-
-void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
-{
- transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
-
- if (transport_cmd_check_stop_to_fabric(cmd))
- return;
-
- transport_generic_remove(cmd, 0);
+ if (remove) {
+ transport_remove_cmd_from_queue(cmd);
+ transport_put_cmd(cmd);
+ }
}
-static void transport_add_cmd_to_queue(
- struct se_cmd *cmd,
- int t_state)
+static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
+ bool at_head)
{
struct se_device *dev = cmd->se_dev;
struct se_queue_obj *qobj = &dev->dev_queue_obj;
unsigned long flags;
- INIT_LIST_HEAD(&cmd->se_queue_node);
-
if (t_state) {
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->t_state = t_state;
@@ -630,15 +597,20 @@ static void transport_add_cmd_to_queue(
}
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) {
- cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL;
+
+ /* If the cmd is already on the list, remove it before we add it */
+ if (!list_empty(&cmd->se_queue_node))
+ list_del(&cmd->se_queue_node);
+ else
+ atomic_inc(&qobj->queue_cnt);
+
+ if (at_head)
list_add(&cmd->se_queue_node, &qobj->qobj_list);
- } else
+ else
list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
- atomic_inc(&cmd->t_transport_queue_active);
+ atomic_set(&cmd->t_transport_queue_active, 1);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- atomic_inc(&qobj->queue_cnt);
wake_up_interruptible(&qobj->thread_wq);
}
@@ -655,19 +627,18 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj)
}
cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
- atomic_dec(&cmd->t_transport_queue_active);
+ atomic_set(&cmd->t_transport_queue_active, 0);
- list_del(&cmd->se_queue_node);
+ list_del_init(&cmd->se_queue_node);
atomic_dec(&qobj->queue_cnt);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
return cmd;
}
-static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
- struct se_queue_obj *qobj)
+static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
{
- struct se_cmd *t;
+ struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
unsigned long flags;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
@@ -675,14 +646,9 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
return;
}
-
- list_for_each_entry(t, &qobj->qobj_list, se_queue_node)
- if (t == cmd) {
- atomic_dec(&cmd->t_transport_queue_active);
- atomic_dec(&qobj->queue_cnt);
- list_del(&cmd->se_queue_node);
- break;
- }
+ atomic_set(&cmd->t_transport_queue_active, 0);
+ atomic_dec(&qobj->queue_cnt);
+ list_del_init(&cmd->se_queue_node);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
if (atomic_read(&cmd->t_transport_queue_active)) {
@@ -715,6 +681,13 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
}
EXPORT_SYMBOL(transport_complete_sync_cache);
+static void target_complete_failure_work(struct work_struct *work)
+{
+ struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+
+ transport_generic_request_failure(cmd, 1, 1);
+}
+
/* transport_complete_task():
*
* Called from interrupt and non interrupt context depending
@@ -723,8 +696,7 @@ EXPORT_SYMBOL(transport_complete_sync_cache);
void transport_complete_task(struct se_task *task, int success)
{
struct se_cmd *cmd = task->task_se_cmd;
- struct se_device *dev = task->se_dev;
- int t_state;
+ struct se_device *dev = cmd->se_dev;
unsigned long flags;
#if 0
pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
@@ -734,7 +706,7 @@ void transport_complete_task(struct se_task *task, int success)
atomic_inc(&dev->depth_left);
spin_lock_irqsave(&cmd->t_state_lock, flags);
- atomic_set(&task->task_active, 0);
+ task->task_flags &= ~TF_ACTIVE;
/*
* See if any sense data exists, if so set the TASK_SENSE flag.
@@ -753,68 +725,39 @@ void transport_complete_task(struct se_task *task, int success)
* See if we are waiting for outstanding struct se_task
* to complete for an exception condition
*/
- if (atomic_read(&task->task_stop)) {
- /*
- * Decrement cmd->t_se_count if this task had
- * previously thrown its timeout exception handler.
- */
- if (atomic_read(&task->task_timeout)) {
- atomic_dec(&cmd->t_se_count);
- atomic_set(&task->task_timeout, 0);
- }
+ if (task->task_flags & TF_REQUEST_STOP) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
complete(&task->task_stop_comp);
return;
}
/*
- * If the task's timeout handler has fired, use the t_task_cdbs_timeout
- * left counter to determine when the struct se_cmd is ready to be queued to
- * the processing thread.
- */
- if (atomic_read(&task->task_timeout)) {
- if (!atomic_dec_and_test(
- &cmd->t_task_cdbs_timeout_left)) {
- spin_unlock_irqrestore(&cmd->t_state_lock,
- flags);
- return;
- }
- t_state = TRANSPORT_COMPLETE_TIMEOUT;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- transport_add_cmd_to_queue(cmd, t_state);
- return;
- }
- atomic_dec(&cmd->t_task_cdbs_timeout_left);
-
- /*
* Decrement the outstanding t_task_cdbs_left count. The last
* struct se_task from struct se_cmd will complete itself into the
* device queue depending upon int success.
*/
if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
- if (!success)
- cmd->t_tasks_failed = 1;
-
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
if (!success || cmd->t_tasks_failed) {
- t_state = TRANSPORT_COMPLETE_FAILURE;
if (!task->task_error_status) {
task->task_error_status =
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
cmd->transport_error_status =
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
+ INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
atomic_set(&cmd->t_transport_complete, 1);
- t_state = TRANSPORT_COMPLETE_OK;
+ INIT_WORK(&cmd->work, target_complete_ok_work);
}
+
+ cmd->t_state = TRANSPORT_COMPLETE;
+ atomic_set(&cmd->t_transport_active, 1);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- transport_add_cmd_to_queue(cmd, t_state);
+ queue_work(target_completion_wq, &cmd->work);
}
EXPORT_SYMBOL(transport_complete_task);
@@ -901,14 +844,12 @@ static void __transport_add_task_to_execute_queue(
static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
{
- struct se_device *dev;
+ struct se_device *dev = cmd->se_dev;
struct se_task *task;
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry(task, &cmd->t_task_list, t_list) {
- dev = task->se_dev;
-
if (atomic_read(&task->task_state_active))
continue;
@@ -933,38 +874,36 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_for_each_entry(task, &cmd->t_task_list, t_list) {
- if (atomic_read(&task->task_execute_queue))
+ if (!list_empty(&task->t_execute_list))
continue;
/*
* __transport_add_task_to_execute_queue() handles the
* SAM Task Attribute emulation if enabled
*/
__transport_add_task_to_execute_queue(task, task_prev, dev);
- atomic_set(&task->task_execute_queue, 1);
task_prev = task;
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
-/* transport_remove_task_from_execute_queue():
- *
- *
- */
+void __transport_remove_task_from_execute_queue(struct se_task *task,
+ struct se_device *dev)
+{
+ list_del_init(&task->t_execute_list);
+ atomic_dec(&dev->execute_tasks);
+}
+
void transport_remove_task_from_execute_queue(
struct se_task *task,
struct se_device *dev)
{
unsigned long flags;
- if (atomic_read(&task->task_execute_queue) == 0) {
- dump_stack();
+ if (WARN_ON(list_empty(&task->t_execute_list)))
return;
- }
spin_lock_irqsave(&dev->execute_task_lock, flags);
- list_del(&task->t_execute_list);
- atomic_set(&task->task_execute_queue, 0);
- atomic_dec(&dev->execute_tasks);
+ __transport_remove_task_from_execute_queue(task, dev);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
@@ -976,30 +915,26 @@ static void target_qf_do_work(struct work_struct *work)
{
struct se_device *dev = container_of(work, struct se_device,
qf_work_queue);
+ LIST_HEAD(qf_cmd_list);
struct se_cmd *cmd, *cmd_tmp;
spin_lock_irq(&dev->qf_cmd_lock);
- list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) {
+ list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
+ spin_unlock_irq(&dev->qf_cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
list_del(&cmd->se_qf_node);
atomic_dec(&dev->dev_qf_count);
smp_mb__after_atomic_dec();
- spin_unlock_irq(&dev->qf_cmd_lock);
pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
- (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" :
+ (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
: "UNKNOWN");
- /*
- * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd
- * has been added to head of queue
- */
- transport_add_cmd_to_queue(cmd, cmd->t_state);
- spin_lock_irq(&dev->qf_cmd_lock);
+ transport_add_cmd_to_queue(cmd, cmd->t_state, true);
}
- spin_unlock_irq(&dev->qf_cmd_lock);
}
unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
@@ -1053,41 +988,6 @@ void transport_dump_dev_state(
*bl += sprintf(b + *bl, " ");
}
-/* transport_release_all_cmds():
- *
- *
- */
-static void transport_release_all_cmds(struct se_device *dev)
-{
- struct se_cmd *cmd, *tcmd;
- int bug_out = 0, t_state;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
- list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list,
- se_queue_node) {
- t_state = cmd->t_state;
- list_del(&cmd->se_queue_node);
- spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
- flags);
-
- pr_err("Releasing ITT: 0x%08x, i_state: %u,"
- " t_state: %u directly\n",
- cmd->se_tfo->get_task_tag(cmd),
- cmd->se_tfo->get_cmd_state(cmd), t_state);
-
- transport_release_fe_cmd(cmd);
- bug_out = 1;
-
- spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
- }
- spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags);
-#if 0
- if (bug_out)
- BUG();
-#endif
-}
-
void transport_dump_vpd_proto_id(
struct t10_vpd *vpd,
unsigned char *p_buf,
@@ -1573,7 +1473,6 @@ transport_generic_get_task(struct se_cmd *cmd,
INIT_LIST_HEAD(&task->t_state_list);
init_completion(&task->task_stop_comp);
task->task_se_cmd = cmd;
- task->se_dev = dev;
task->task_data_direction = data_direction;
return task;
@@ -1598,6 +1497,7 @@ void transport_init_se_cmd(
INIT_LIST_HEAD(&cmd->se_delayed_node);
INIT_LIST_HEAD(&cmd->se_ordered_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
+ INIT_LIST_HEAD(&cmd->se_queue_node);
INIT_LIST_HEAD(&cmd->t_task_list);
init_completion(&cmd->transport_lun_fe_stop_comp);
@@ -1641,21 +1541,6 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
return 0;
}
-void transport_free_se_cmd(
- struct se_cmd *se_cmd)
-{
- if (se_cmd->se_tmr_req)
- core_tmr_release_req(se_cmd->se_tmr_req);
- /*
- * Check and free any extended CDB buffer that was allocated
- */
- if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb)
- kfree(se_cmd->t_task_cdb);
-}
-EXPORT_SYMBOL(transport_free_se_cmd);
-
-static void transport_generic_wait_for_tasks(struct se_cmd *, int, int);
-
/* transport_generic_allocate_tasks():
*
* Called from fabric RX Thread.
@@ -1667,12 +1552,6 @@ int transport_generic_allocate_tasks(
int ret;
transport_generic_prepare_cdb(cdb);
-
- /*
- * This is needed for early exceptions.
- */
- cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
-
/*
* Ensure that the received CDB is less than the max (252 + 8) bytes
* for VARIABLE_LENGTH_CMD
@@ -1730,26 +1609,6 @@ int transport_generic_allocate_tasks(
EXPORT_SYMBOL(transport_generic_allocate_tasks);
/*
- * Used by fabric module frontends not defining a TFO->new_cmd_map()
- * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis
- */
-int transport_generic_handle_cdb(
- struct se_cmd *cmd)
-{
- if (!cmd->se_lun) {
- dump_stack();
- pr_err("cmd->se_lun is NULL\n");
- return -EINVAL;
- }
-
- transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
- return 0;
-}
-EXPORT_SYMBOL(transport_generic_handle_cdb);
-
-static void transport_generic_request_failure(struct se_cmd *,
- struct se_device *, int, int);
-/*
* Used by fabric module frontends to queue tasks directly.
* Many only be used from process context only
*/
@@ -1773,7 +1632,7 @@ int transport_handle_cdb_direct(
* Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
* transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
* in existing usage to ensure that outstanding descriptors are handled
- * correctly during shutdown via transport_generic_wait_for_tasks()
+ * correctly during shutdown via transport_wait_for_tasks()
*
* Also, we don't take cmd->t_state_lock here as we only expect
* this to be called for initial descriptor submission.
@@ -1790,7 +1649,7 @@ int transport_handle_cdb_direct(
return 0;
else if (ret < 0) {
cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd, NULL, 0,
+ transport_generic_request_failure(cmd, 0,
(cmd->data_direction != DMA_TO_DEVICE));
}
return 0;
@@ -1811,7 +1670,7 @@ int transport_generic_handle_cdb_map(
return -EINVAL;
}
- transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
+ transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
return 0;
}
EXPORT_SYMBOL(transport_generic_handle_cdb_map);
@@ -1841,7 +1700,7 @@ int transport_generic_handle_data(
if (transport_check_aborted_status(cmd, 1) != 0)
return 0;
- transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
+ transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
return 0;
}
EXPORT_SYMBOL(transport_generic_handle_data);
@@ -1853,12 +1712,7 @@ EXPORT_SYMBOL(transport_generic_handle_data);
int transport_generic_handle_tmr(
struct se_cmd *cmd)
{
- /*
- * This is needed for early exceptions.
- */
- cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
-
- transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
+ transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
return 0;
}
EXPORT_SYMBOL(transport_generic_handle_tmr);
@@ -1866,10 +1720,36 @@ EXPORT_SYMBOL(transport_generic_handle_tmr);
void transport_generic_free_cmd_intr(
struct se_cmd *cmd)
{
- transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
+ transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR, false);
}
EXPORT_SYMBOL(transport_generic_free_cmd_intr);
+/*
+ * If the task is active, request it to be stopped and sleep until it
+ * has completed.
+ */
+bool target_stop_task(struct se_task *task, unsigned long *flags)
+{
+ struct se_cmd *cmd = task->task_se_cmd;
+ bool was_active = false;
+
+ if (task->task_flags & TF_ACTIVE) {
+ task->task_flags |= TF_REQUEST_STOP;
+ spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
+
+ pr_debug("Task %p waiting to complete\n", task);
+ wait_for_completion(&task->task_stop_comp);
+ pr_debug("Task %p stopped successfully\n", task);
+
+ spin_lock_irqsave(&cmd->t_state_lock, *flags);
+ atomic_dec(&cmd->t_task_cdbs_left);
+ task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
+ was_active = true;
+ }
+
+ return was_active;
+}
+
static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
{
struct se_task *task, *task_tmp;
@@ -1885,51 +1765,26 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry_safe(task, task_tmp,
&cmd->t_task_list, t_list) {
- pr_debug("task_no[%d] - Processing task %p\n",
- task->task_no, task);
+ pr_debug("Processing task %p\n", task);
/*
* If the struct se_task has not been sent and is not active,
* remove the struct se_task from the execution queue.
*/
- if (!atomic_read(&task->task_sent) &&
- !atomic_read(&task->task_active)) {
+ if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
transport_remove_task_from_execute_queue(task,
- task->se_dev);
+ cmd->se_dev);
- pr_debug("task_no[%d] - Removed from execute queue\n",
- task->task_no);
+ pr_debug("Task %p removed from execute queue\n", task);
spin_lock_irqsave(&cmd->t_state_lock, flags);
continue;
}
- /*
- * If the struct se_task is active, sleep until it is returned
- * from the plugin.
- */
- if (atomic_read(&task->task_active)) {
- atomic_set(&task->task_stop, 1);
- spin_unlock_irqrestore(&cmd->t_state_lock,
- flags);
-
- pr_debug("task_no[%d] - Waiting to complete\n",
- task->task_no);
- wait_for_completion(&task->task_stop_comp);
- pr_debug("task_no[%d] - Stopped successfully\n",
- task->task_no);
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- atomic_dec(&cmd->t_task_cdbs_left);
-
- atomic_set(&task->task_active, 0);
- atomic_set(&task->task_stop, 0);
- } else {
- pr_debug("task_no[%d] - Did nothing\n", task->task_no);
+ if (!target_stop_task(task, &flags)) {
+ pr_debug("Task %p - did nothing\n", task);
ret++;
}
-
- __transport_stop_task_timer(task, &flags);
}
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -1941,7 +1796,6 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
*/
static void transport_generic_request_failure(
struct se_cmd *cmd,
- struct se_device *dev,
int complete,
int sc)
{
@@ -1950,10 +1804,9 @@ static void transport_generic_request_failure(
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
cmd->t_task_cdb[0]);
- pr_debug("-----[ i_state: %d t_state/def_t_state:"
- " %d/%d transport_error_status: %d\n",
+ pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
cmd->se_tfo->get_cmd_state(cmd),
- cmd->t_state, cmd->deferred_t_state,
+ cmd->t_state,
cmd->transport_error_status);
pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
@@ -1966,10 +1819,6 @@ static void transport_generic_request_failure(
atomic_read(&cmd->t_transport_stop),
atomic_read(&cmd->t_transport_sent));
- transport_stop_all_task_timers(cmd);
-
- if (dev)
- atomic_inc(&dev->depth_left);
/*
* For SAM Task Attribute emulation for failed struct se_cmd
*/
@@ -1977,7 +1826,6 @@ static void transport_generic_request_failure(
transport_complete_task_attr(cmd);
if (complete) {
- transport_direct_request_timeout(cmd);
cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -2076,46 +1924,8 @@ check_stop:
return;
queue_full:
- cmd->t_state = TRANSPORT_COMPLETE_OK;
- transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
-}
-
-static void transport_direct_request_timeout(struct se_cmd *cmd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (!atomic_read(&cmd->t_transport_timeout)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
- }
- if (atomic_read(&cmd->t_task_cdbs_timeout_left)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
- }
-
- atomic_sub(atomic_read(&cmd->t_transport_timeout),
- &cmd->t_se_count);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-}
-
-static void transport_generic_request_timeout(struct se_cmd *cmd)
-{
- unsigned long flags;
-
- /*
- * Reset cmd->t_se_count to allow transport_generic_remove()
- * to allow last call to free memory resources.
- */
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (atomic_read(&cmd->t_transport_timeout) > 1) {
- int tmp = (atomic_read(&cmd->t_transport_timeout) - 1);
-
- atomic_sub(tmp, &cmd->t_se_count);
- }
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- transport_generic_remove(cmd, 0);
+ cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
+ transport_handle_queue_full(cmd, cmd->se_dev);
}
static inline u32 transport_lba_21(unsigned char *cdb)
@@ -2160,127 +1970,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
-/*
- * Called from interrupt context.
- */
-static void transport_task_timeout_handler(unsigned long data)
-{
- struct se_task *task = (struct se_task *)data;
- struct se_cmd *cmd = task->task_se_cmd;
- unsigned long flags;
-
- pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (task->task_flags & TF_STOP) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
- }
- task->task_flags &= ~TF_RUNNING;
-
- /*
- * Determine if transport_complete_task() has already been called.
- */
- if (!atomic_read(&task->task_active)) {
- pr_debug("transport task: %p cmd: %p timeout task_active"
- " == 0\n", task, cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
- }
-
- atomic_inc(&cmd->t_se_count);
- atomic_inc(&cmd->t_transport_timeout);
- cmd->t_tasks_failed = 1;
-
- atomic_set(&task->task_timeout, 1);
- task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
- task->task_scsi_status = 1;
-
- if (atomic_read(&task->task_stop)) {
- pr_debug("transport task: %p cmd: %p timeout task_stop"
- " == 1\n", task, cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&task->task_stop_comp);
- return;
- }
-
- if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
- pr_debug("transport task: %p cmd: %p timeout non zero"
- " t_task_cdbs_left\n", task, cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
- }
- pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
- task, cmd);
-
- cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
-}
-
-/*
- * Called with cmd->t_state_lock held.
- */
-static void transport_start_task_timer(struct se_task *task)
-{
- struct se_device *dev = task->se_dev;
- int timeout;
-
- if (task->task_flags & TF_RUNNING)
- return;
- /*
- * If the task_timeout is disabled, exit now.
- */
- timeout = dev->se_sub_dev->se_dev_attrib.task_timeout;
- if (!timeout)
- return;
-
- init_timer(&task->task_timer);
- task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
- task->task_timer.data = (unsigned long) task;
- task->task_timer.function = transport_task_timeout_handler;
-
- task->task_flags |= TF_RUNNING;
- add_timer(&task->task_timer);
-#if 0
- pr_debug("Starting task timer for cmd: %p task: %p seconds:"
- " %d\n", task->task_se_cmd, task, timeout);
-#endif
-}
-
-/*
- * Called with spin_lock_irq(&cmd->t_state_lock) held.
- */
-void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
-{
- struct se_cmd *cmd = task->task_se_cmd;
-
- if (!task->task_flags & TF_RUNNING)
- return;
-
- task->task_flags |= TF_STOP;
- spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
-
- del_timer_sync(&task->task_timer);
-
- spin_lock_irqsave(&cmd->t_state_lock, *flags);
- task->task_flags &= ~TF_RUNNING;
- task->task_flags &= ~TF_STOP;
-}
-
-static void transport_stop_all_task_timers(struct se_cmd *cmd)
-{
- struct se_task *task = NULL, *task_tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- list_for_each_entry_safe(task, task_tmp,
- &cmd->t_task_list, t_list)
- __transport_stop_task_timer(task, &flags);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-}
-
static inline int transport_tcq_window_closed(struct se_device *dev)
{
if (dev->dev_tcq_window_closed++ <
@@ -2385,7 +2074,7 @@ static int transport_execute_tasks(struct se_cmd *cmd)
if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
- transport_generic_request_failure(cmd, NULL, 0, 1);
+ transport_generic_request_failure(cmd, 0, 1);
return 0;
}
@@ -2448,9 +2137,7 @@ check_depth:
}
task = list_first_entry(&dev->execute_task_list,
struct se_task, t_execute_list);
- list_del(&task->t_execute_list);
- atomic_set(&task->task_execute_queue, 0);
- atomic_dec(&dev->execute_tasks);
+ __transport_remove_task_from_execute_queue(task, dev);
spin_unlock_irq(&dev->execute_task_lock);
atomic_dec(&dev->depth_left);
@@ -2458,15 +2145,13 @@ check_depth:
cmd = task->task_se_cmd;
spin_lock_irqsave(&cmd->t_state_lock, flags);
- atomic_set(&task->task_active, 1);
- atomic_set(&task->task_sent, 1);
+ task->task_flags |= (TF_ACTIVE | TF_SENT);
atomic_inc(&cmd->t_task_cdbs_sent);
if (atomic_read(&cmd->t_task_cdbs_sent) ==
cmd->t_task_list_num)
- atomic_set(&cmd->transport_sent, 1);
+ atomic_set(&cmd->t_transport_sent, 1);
- transport_start_task_timer(task);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
/*
* The struct se_cmd->transport_emulate_cdb() function pointer is used
@@ -2477,10 +2162,13 @@ check_depth:
error = cmd->transport_emulate_cdb(cmd);
if (error != 0) {
cmd->transport_error_status = error;
- atomic_set(&task->task_active, 0);
- atomic_set(&cmd->transport_sent, 0);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ task->task_flags &= ~TF_ACTIVE;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd);
- transport_generic_request_failure(cmd, dev, 0, 1);
+ atomic_inc(&dev->depth_left);
+ transport_generic_request_failure(cmd, 0, 1);
goto check_depth;
}
/*
@@ -2513,10 +2201,13 @@ check_depth:
if (error != 0) {
cmd->transport_error_status = error;
- atomic_set(&task->task_active, 0);
- atomic_set(&cmd->transport_sent, 0);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ task->task_flags &= ~TF_ACTIVE;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd);
- transport_generic_request_failure(cmd, dev, 0, 1);
+ atomic_inc(&dev->depth_left);
+ transport_generic_request_failure(cmd, 0, 1);
}
}
@@ -2538,8 +2229,6 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd)
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
-static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
-
static inline u32 transport_get_sectors_6(
unsigned char *cdb,
struct se_cmd *cmd,
@@ -2752,13 +2441,16 @@ out:
static int transport_get_sense_data(struct se_cmd *cmd)
{
unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
- struct se_device *dev;
+ struct se_device *dev = cmd->se_dev;
struct se_task *task = NULL, *task_tmp;
unsigned long flags;
u32 offset = 0;
WARN_ON(!cmd->se_lun);
+ if (!dev)
+ return 0;
+
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -2767,14 +2459,9 @@ static int transport_get_sense_data(struct se_cmd *cmd)
list_for_each_entry_safe(task, task_tmp,
&cmd->t_task_list, t_list) {
-
if (!task->task_sense)
continue;
- dev = task->se_dev;
- if (!dev)
- continue;
-
if (!dev->transport->get_sense_buffer) {
pr_err("dev->transport->get_sense_buffer"
" is NULL\n");
@@ -2783,9 +2470,9 @@ static int transport_get_sense_data(struct se_cmd *cmd)
sense_buffer = dev->transport->get_sense_buffer(task);
if (!sense_buffer) {
- pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate"
+ pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
" sense buffer for task with sense\n",
- cmd->se_tfo->get_task_tag(cmd), task->task_no);
+ cmd->se_tfo->get_task_tag(cmd), task);
continue;
}
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -2814,7 +2501,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
static int
transport_handle_reservation_conflict(struct se_cmd *cmd)
{
- cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
@@ -2915,8 +2601,6 @@ static int transport_generic_cmd_sequencer(
* Check for an existing UNIT ATTENTION condition
*/
if (core_scsi3_ua_check(cmd, cdb) < 0) {
- cmd->transport_wait_for_tasks =
- &transport_nop_wait_for_tasks;
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
return -EINVAL;
@@ -2926,7 +2610,6 @@ static int transport_generic_cmd_sequencer(
*/
ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
if (ret != 0) {
- cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
/*
* Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
* The ALUA additional sense code qualifier (ASCQ) is determined
@@ -2965,7 +2648,6 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_6;
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
@@ -2974,7 +2656,6 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_10;
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
@@ -2983,7 +2664,6 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_12;
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
@@ -2992,7 +2672,6 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_16;
cmd->t_task_lba = transport_lba_64(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
@@ -3001,7 +2680,6 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_6;
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
@@ -3010,7 +2688,6 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_10;
cmd->t_task_lba = transport_lba_32(cdb);
cmd->t_tasks_fua = (cdb[1] & 0x8);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
@@ -3020,7 +2697,6 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_12;
cmd->t_task_lba = transport_lba_32(cdb);
cmd->t_tasks_fua = (cdb[1] & 0x8);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
@@ -3030,7 +2706,6 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_16;
cmd->t_task_lba = transport_lba_64(cdb);
cmd->t_tasks_fua = (cdb[1] & 0x8);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
@@ -3043,18 +2718,14 @@ static int transport_generic_cmd_sequencer(
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
- cmd->transport_split_cdb = &split_cdb_XX_10;
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- passthrough = (dev->transport->transport_type ==
- TRANSPORT_PLUGIN_PHBA_PDEV);
- /*
- * Skip the remaining assignments for TCM/PSCSI passthrough
- */
- if (passthrough)
- break;
+
+ if (dev->transport->transport_type ==
+ TRANSPORT_PLUGIN_PHBA_PDEV)
+ goto out_unsupported_cdb;
/*
- * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
+ * Setup BIDI XOR callback to be run after I/O completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
cmd->t_tasks_fua = (cdb[1] & 0x8);
@@ -3078,19 +2749,14 @@ static int transport_generic_cmd_sequencer(
* Use WRITE_32 and READ_32 opcodes for the emulated
* XDWRITE_READ_32 logic.
*/
- cmd->transport_split_cdb = &split_cdb_XX_32;
cmd->t_task_lba = transport_lba_64_ext(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- /*
- * Skip the remaining assignments for TCM/PSCSI passthrough
- */
if (passthrough)
- break;
-
+ goto out_unsupported_cdb;
/*
- * Setup BIDI XOR callback to be run during
- * transport_generic_complete_ok()
+ * Setup BIDI XOR callback to be run during after I/O
+ * completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
cmd->t_tasks_fua = (cdb[10] & 0x8);
@@ -3430,7 +3096,6 @@ static int transport_generic_cmd_sequencer(
pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
" 0x%02x, sending CHECK_CONDITION.\n",
cmd->se_tfo->get_fabric_name(), cdb[0]);
- cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
goto out_unsupported_cdb;
}
@@ -3488,8 +3153,7 @@ out_invalid_cdb_field:
}
/*
- * Called from transport_generic_complete_ok() and
- * transport_generic_request_failure() to determine which dormant/delayed
+ * Called from I/O completion to determine which dormant/delayed
* and ordered cmds need to have their tasks added to the execution queue.
*/
static void transport_complete_task_attr(struct se_cmd *cmd)
@@ -3557,12 +3221,18 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
}
-static int transport_complete_qf(struct se_cmd *cmd)
+static void transport_complete_qf(struct se_cmd *cmd)
{
int ret = 0;
- if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
- return cmd->se_tfo->queue_status(cmd);
+ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ transport_complete_task_attr(cmd);
+
+ if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (ret)
+ goto out;
+ }
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
@@ -3572,7 +3242,7 @@ static int transport_complete_qf(struct se_cmd *cmd)
if (cmd->t_bidi_data_sg) {
ret = cmd->se_tfo->queue_data_in(cmd);
if (ret < 0)
- return ret;
+ break;
}
/* Fall through for DMA_TO_DEVICE */
case DMA_NONE:
@@ -3582,17 +3252,20 @@ static int transport_complete_qf(struct se_cmd *cmd)
break;
}
- return ret;
+out:
+ if (ret < 0) {
+ transport_handle_queue_full(cmd, cmd->se_dev);
+ return;
+ }
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
}
static void transport_handle_queue_full(
struct se_cmd *cmd,
- struct se_device *dev,
- int (*qf_callback)(struct se_cmd *))
+ struct se_device *dev)
{
spin_lock_irq(&dev->qf_cmd_lock);
- cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL;
- cmd->transport_qf_callback = qf_callback;
list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
atomic_inc(&dev->dev_qf_count);
smp_mb__after_atomic_inc();
@@ -3601,9 +3274,11 @@ static void transport_handle_queue_full(
schedule_work(&cmd->se_dev->qf_work_queue);
}
-static void transport_generic_complete_ok(struct se_cmd *cmd)
+static void target_complete_ok_work(struct work_struct *work)
{
+ struct se_cmd *cmd = container_of(work, struct se_cmd, work);
int reason = 0, ret;
+
/*
* Check if we need to move delayed/dormant tasks from cmds on the
* delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
@@ -3618,14 +3293,6 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
schedule_work(&cmd->se_dev->qf_work_queue);
- if (cmd->transport_qf_callback) {
- ret = cmd->transport_qf_callback(cmd);
- if (ret < 0)
- goto queue_full;
-
- cmd->transport_qf_callback = NULL;
- goto done;
- }
/*
* Check if we need to retrieve a sense buffer from
* the struct se_cmd in question.
@@ -3701,7 +3368,6 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
break;
}
-done:
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
@@ -3709,34 +3375,35 @@ done:
queue_full:
pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
" data_direction: %d\n", cmd, cmd->data_direction);
- transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
+ cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
+ transport_handle_queue_full(cmd, cmd->se_dev);
}
static void transport_free_dev_tasks(struct se_cmd *cmd)
{
struct se_task *task, *task_tmp;
unsigned long flags;
+ LIST_HEAD(dispose_list);
spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry_safe(task, task_tmp,
&cmd->t_task_list, t_list) {
- if (atomic_read(&task->task_active))
- continue;
+ if (!(task->task_flags & TF_ACTIVE))
+ list_move_tail(&task->t_list, &dispose_list);
+ }
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ while (!list_empty(&dispose_list)) {
+ task = list_first_entry(&dispose_list, struct se_task, t_list);
- kfree(task->task_sg_bidi);
- kfree(task->task_sg);
+ if (task->task_sg != cmd->t_data_sg &&
+ task->task_sg != cmd->t_bidi_data_sg)
+ kfree(task->task_sg);
list_del(&task->t_list);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- if (task->se_dev)
- task->se_dev->transport->free_task(task);
- else
- pr_err("task[%u] - task->se_dev is NULL\n",
- task->task_no);
- spin_lock_irqsave(&cmd->t_state_lock, flags);
+ cmd->se_dev->transport->free_task(task);
}
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
@@ -3764,89 +3431,43 @@ static inline void transport_free_pages(struct se_cmd *cmd)
cmd->t_bidi_data_nents = 0;
}
-static inline void transport_release_tasks(struct se_cmd *cmd)
-{
- transport_free_dev_tasks(cmd);
-}
-
-static inline int transport_dec_and_check(struct se_cmd *cmd)
+/**
+ * transport_put_cmd - release a reference to a command
+ * @cmd: command to release
+ *
+ * This routine releases our reference to the command and frees it if possible.
+ */
+static void transport_put_cmd(struct se_cmd *cmd)
{
unsigned long flags;
+ int free_tasks = 0;
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (atomic_read(&cmd->t_fe_count)) {
- if (!atomic_dec_and_test(&cmd->t_fe_count)) {
- spin_unlock_irqrestore(&cmd->t_state_lock,
- flags);
- return 1;
- }
+ if (!atomic_dec_and_test(&cmd->t_fe_count))
+ goto out_busy;
}
if (atomic_read(&cmd->t_se_count)) {
- if (!atomic_dec_and_test(&cmd->t_se_count)) {
- spin_unlock_irqrestore(&cmd->t_state_lock,
- flags);
- return 1;
- }
+ if (!atomic_dec_and_test(&cmd->t_se_count))
+ goto out_busy;
}
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return 0;
-}
-
-static void transport_release_fe_cmd(struct se_cmd *cmd)
-{
- unsigned long flags;
-
- if (transport_dec_and_check(cmd))
- return;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (!atomic_read(&cmd->transport_dev_active)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- goto free_pages;
- }
- atomic_set(&cmd->transport_dev_active, 0);
- transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- transport_release_tasks(cmd);
-free_pages:
- transport_free_pages(cmd);
- transport_free_se_cmd(cmd);
- cmd->se_tfo->release_cmd(cmd);
-}
-
-static int
-transport_generic_remove(struct se_cmd *cmd, int session_reinstatement)
-{
- unsigned long flags;
-
- if (transport_dec_and_check(cmd)) {
- if (session_reinstatement) {
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock,
- flags);
- }
- return 1;
- }
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (!atomic_read(&cmd->transport_dev_active)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- goto free_pages;
+ if (atomic_read(&cmd->transport_dev_active)) {
+ atomic_set(&cmd->transport_dev_active, 0);
+ transport_all_task_dev_remove_state(cmd);
+ free_tasks = 1;
}
- atomic_set(&cmd->transport_dev_active, 0);
- transport_all_task_dev_remove_state(cmd);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- transport_release_tasks(cmd);
+ if (free_tasks != 0)
+ transport_free_dev_tasks(cmd);
-free_pages:
transport_free_pages(cmd);
transport_release_cmd(cmd);
- return 0;
+ return;
+out_busy:
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
/*
@@ -3888,62 +3509,6 @@ int transport_generic_map_mem_to_cmd(
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
-static int transport_new_cmd_obj(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- int set_counts = 1, rc, task_cdbs;
-
- /*
- * Setup any BIDI READ tasks and memory from
- * cmd->t_mem_bidi_list so the READ struct se_tasks
- * are queued first for the non pSCSI passthrough case.
- */
- if (cmd->t_bidi_data_sg &&
- (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
- rc = transport_allocate_tasks(cmd,
- cmd->t_task_lba,
- DMA_FROM_DEVICE,
- cmd->t_bidi_data_sg,
- cmd->t_bidi_data_nents);
- if (rc <= 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
- }
- atomic_inc(&cmd->t_fe_count);
- atomic_inc(&cmd->t_se_count);
- set_counts = 0;
- }
- /*
- * Setup the tasks and memory from cmd->t_mem_list
- * Note for BIDI transfers this will contain the WRITE payload
- */
- task_cdbs = transport_allocate_tasks(cmd,
- cmd->t_task_lba,
- cmd->data_direction,
- cmd->t_data_sg,
- cmd->t_data_nents);
- if (task_cdbs <= 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
- }
-
- if (set_counts) {
- atomic_inc(&cmd->t_fe_count);
- atomic_inc(&cmd->t_se_count);
- }
-
- cmd->t_task_list_num = task_cdbs;
-
- atomic_set(&cmd->t_task_cdbs_left, task_cdbs);
- atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs);
- atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs);
- return 0;
-}
-
void *transport_kmap_first_data_page(struct se_cmd *cmd)
{
struct scatterlist *sg = cmd->t_data_sg;
@@ -4054,15 +3619,13 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
/*
* For the padded tasks, use the extra SGL vector allocated
* in transport_allocate_data_tasks() for the sg_prev_nents
- * offset into sg_chain() above.. The last task of a
- * multi-task list, or a single task will not have
- * task->task_sg_padded set..
+ * offset into sg_chain() above.
+ *
+ * We do not need the padding for the last task (or a single
+ * task), but in that case we will never use the sg_prev_nents
+ * value below which would be incorrect.
*/
- if (task->task_padded_sg)
- sg_prev_nents = (task->task_sg_nents + 1);
- else
- sg_prev_nents = task->task_sg_nents;
-
+ sg_prev_nents = (task->task_sg_nents + 1);
sg_prev = task->task_sg;
}
/*
@@ -4092,30 +3655,60 @@ EXPORT_SYMBOL(transport_do_task_sg_chain);
/*
* Break up cmd into chunks transport can handle
*/
-static int transport_allocate_data_tasks(
- struct se_cmd *cmd,
- unsigned long long lba,
+static int
+transport_allocate_data_tasks(struct se_cmd *cmd,
enum dma_data_direction data_direction,
- struct scatterlist *sgl,
- unsigned int sgl_nents)
+ struct scatterlist *cmd_sg, unsigned int sgl_nents)
{
- unsigned char *cdb = NULL;
- struct se_task *task;
struct se_device *dev = cmd->se_dev;
- unsigned long flags;
- int task_count, i, ret;
- sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
- u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
- struct scatterlist *sg;
- struct scatterlist *cmd_sg;
+ int task_count, i;
+ unsigned long long lba;
+ sector_t sectors, dev_max_sectors;
+ u32 sector_size;
+
+ if (transport_cmd_get_valid_sectors(cmd) < 0)
+ return -EINVAL;
+
+ dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
+ sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
WARN_ON(cmd->data_length % sector_size);
+
+ lba = cmd->t_task_lba;
sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
-
- cmd_sg = sgl;
+
+ /*
+ * If we need just a single task reuse the SG list in the command
+ * and avoid a lot of work.
+ */
+ if (task_count == 1) {
+ struct se_task *task;
+ unsigned long flags;
+
+ task = transport_generic_get_task(cmd, data_direction);
+ if (!task)
+ return -ENOMEM;
+
+ task->task_sg = cmd_sg;
+ task->task_sg_nents = sgl_nents;
+
+ task->task_lba = lba;
+ task->task_sectors = sectors;
+ task->task_size = task->task_sectors * sector_size;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ list_add_tail(&task->t_list, &cmd->t_task_list);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ return task_count;
+ }
+
for (i = 0; i < task_count; i++) {
+ struct se_task *task;
unsigned int task_size, task_sg_nents_padded;
+ struct scatterlist *sg;
+ unsigned long flags;
int count;
task = transport_generic_get_task(cmd, data_direction);
@@ -4126,14 +3719,6 @@ static int transport_allocate_data_tasks(
task->task_sectors = min(sectors, dev_max_sectors);
task->task_size = task->task_sectors * sector_size;
- cdb = dev->transport->get_cdb(task);
- BUG_ON(!cdb);
-
- memcpy(cdb, cmd->t_task_cdb,
- scsi_command_size(cmd->t_task_cdb));
-
- /* Update new cdb with updated lba/sectors */
- cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb);
/*
* This now assumes that passed sg_ents are in PAGE_SIZE chunks
* in order to calculate the number per task SGL entries
@@ -4149,7 +3734,6 @@ static int transport_allocate_data_tasks(
*/
if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
task_sg_nents_padded = (task->task_sg_nents + 1);
- task->task_padded_sg = 1;
} else
task_sg_nents_padded = task->task_sg_nents;
@@ -4181,20 +3765,6 @@ static int transport_allocate_data_tasks(
list_add_tail(&task->t_list, &cmd->t_task_list);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
- /*
- * Now perform the memory map of task->task_sg[] into backend
- * subsystem memory..
- */
- list_for_each_entry(task, &cmd->t_task_list, t_list) {
- if (atomic_read(&task->task_sent))
- continue;
- if (!dev->transport->map_data_SG)
- continue;
-
- ret = dev->transport->map_data_SG(task);
- if (ret < 0)
- return 0;
- }
return task_count;
}
@@ -4202,30 +3772,14 @@ static int transport_allocate_data_tasks(
static int
transport_allocate_control_task(struct se_cmd *cmd)
{
- struct se_device *dev = cmd->se_dev;
- unsigned char *cdb;
struct se_task *task;
unsigned long flags;
- int ret = 0;
task = transport_generic_get_task(cmd, cmd->data_direction);
if (!task)
return -ENOMEM;
- cdb = dev->transport->get_cdb(task);
- BUG_ON(!cdb);
- memcpy(cdb, cmd->t_task_cdb,
- scsi_command_size(cmd->t_task_cdb));
-
- task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
- GFP_KERNEL);
- if (!task->task_sg) {
- cmd->se_dev->transport->free_task(task);
- return -ENOMEM;
- }
-
- memcpy(task->task_sg, cmd->t_data_sg,
- sizeof(struct scatterlist) * cmd->t_data_nents);
+ task->task_sg = cmd->t_data_sg;
task->task_size = cmd->data_length;
task->task_sg_nents = cmd->t_data_nents;
@@ -4233,53 +3787,20 @@ transport_allocate_control_task(struct se_cmd *cmd)
list_add_tail(&task->t_list, &cmd->t_task_list);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
- if (dev->transport->map_control_SG)
- ret = dev->transport->map_control_SG(task);
- } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
- if (dev->transport->cdb_none)
- ret = dev->transport->cdb_none(task);
- } else {
- pr_err("target: Unknown control cmd type!\n");
- BUG();
- }
-
/* Success! Return number of tasks allocated */
- if (ret == 0)
- return 1;
- return ret;
-}
-
-static u32 transport_allocate_tasks(
- struct se_cmd *cmd,
- unsigned long long lba,
- enum dma_data_direction data_direction,
- struct scatterlist *sgl,
- unsigned int sgl_nents)
-{
- if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
- if (transport_cmd_get_valid_sectors(cmd) < 0)
- return -EINVAL;
-
- return transport_allocate_data_tasks(cmd, lba, data_direction,
- sgl, sgl_nents);
- } else
- return transport_allocate_control_task(cmd);
-
+ return 1;
}
-
-/* transport_generic_new_cmd(): Called from transport_processing_thread()
- *
- * Allocate storage transport resources from a set of values predefined
- * by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
- * Any non zero return here is treated as an "out of resource' op here.
+/*
+ * Allocate any required ressources to execute the command, and either place
+ * it on the execution queue if possible. For writes we might not have the
+ * payload yet, thus notify the fabric via a call to ->write_pending instead.
*/
- /*
- * Generate struct se_task(s) and/or their payloads for this CDB.
- */
int transport_generic_new_cmd(struct se_cmd *cmd)
{
+ struct se_device *dev = cmd->se_dev;
+ int task_cdbs, task_cdbs_bidi = 0;
+ int set_counts = 1;
int ret = 0;
/*
@@ -4293,16 +3814,45 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
if (ret < 0)
return ret;
}
+
/*
- * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for
- * control or data CDB types, and perform the map to backend subsystem
- * code from SGL memory allocated here by transport_generic_get_mem(), or
- * via pre-existing SGL memory setup explictly by fabric module code with
- * transport_generic_map_mem_to_cmd().
+ * For BIDI command set up the read tasks first.
*/
- ret = transport_new_cmd_obj(cmd);
- if (ret < 0)
- return ret;
+ if (cmd->t_bidi_data_sg &&
+ dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+ BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
+
+ task_cdbs_bidi = transport_allocate_data_tasks(cmd,
+ DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
+ cmd->t_bidi_data_nents);
+ if (task_cdbs_bidi <= 0)
+ goto out_fail;
+
+ atomic_inc(&cmd->t_fe_count);
+ atomic_inc(&cmd->t_se_count);
+ set_counts = 0;
+ }
+
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+ task_cdbs = transport_allocate_data_tasks(cmd,
+ cmd->data_direction, cmd->t_data_sg,
+ cmd->t_data_nents);
+ } else {
+ task_cdbs = transport_allocate_control_task(cmd);
+ }
+
+ if (task_cdbs <= 0)
+ goto out_fail;
+
+ if (set_counts) {
+ atomic_inc(&cmd->t_fe_count);
+ atomic_inc(&cmd->t_se_count);
+ }
+
+ cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
+ atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
+ atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
+
/*
* For WRITEs, let the fabric know its buffer is ready..
* This WRITE struct se_cmd (and all of its associated struct se_task's)
@@ -4320,6 +3870,11 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
*/
transport_execute_tasks(cmd);
return 0;
+
+out_fail:
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
EXPORT_SYMBOL(transport_generic_new_cmd);
@@ -4333,15 +3888,15 @@ void transport_generic_process_write(struct se_cmd *cmd)
}
EXPORT_SYMBOL(transport_generic_process_write);
-static int transport_write_pending_qf(struct se_cmd *cmd)
+static void transport_write_pending_qf(struct se_cmd *cmd)
{
- return cmd->se_tfo->write_pending(cmd);
+ if (cmd->se_tfo->write_pending(cmd) == -EAGAIN) {
+ pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
+ cmd);
+ transport_handle_queue_full(cmd, cmd->se_dev);
+ }
}
-/* transport_generic_write_pending():
- *
- *
- */
static int transport_generic_write_pending(struct se_cmd *cmd)
{
unsigned long flags;
@@ -4351,17 +3906,6 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
cmd->t_state = TRANSPORT_WRITE_PENDING;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- if (cmd->transport_qf_callback) {
- ret = cmd->transport_qf_callback(cmd);
- if (ret == -EAGAIN)
- goto queue_full;
- else if (ret < 0)
- return ret;
-
- cmd->transport_qf_callback = NULL;
- return 0;
- }
-
/*
* Clear the se_cmd for WRITE_PENDING status in order to set
* cmd->t_transport_active=0 so that transport_generic_handle_data
@@ -4386,61 +3930,52 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
- transport_handle_queue_full(cmd, cmd->se_dev,
- transport_write_pending_qf);
+ transport_handle_queue_full(cmd, cmd->se_dev);
return ret;
}
+/**
+ * transport_release_cmd - free a command
+ * @cmd: command to free
+ *
+ * This routine unconditionally frees a command, and reference counting
+ * or list removal must be done in the caller.
+ */
void transport_release_cmd(struct se_cmd *cmd)
{
BUG_ON(!cmd->se_tfo);
- transport_free_se_cmd(cmd);
+ if (cmd->se_tmr_req)
+ core_tmr_release_req(cmd->se_tmr_req);
+ if (cmd->t_task_cdb != cmd->__t_task_cdb)
+ kfree(cmd->t_task_cdb);
cmd->se_tfo->release_cmd(cmd);
}
EXPORT_SYMBOL(transport_release_cmd);
-/* transport_generic_free_cmd():
- *
- * Called from processing frontend to release storage engine resources
- */
-void transport_generic_free_cmd(
- struct se_cmd *cmd,
- int wait_for_tasks,
- int session_reinstatement)
+void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
- if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD))
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
+ if (wait_for_tasks && cmd->se_tmr_req)
+ transport_wait_for_tasks(cmd);
+
transport_release_cmd(cmd);
- else {
+ } else {
+ if (wait_for_tasks)
+ transport_wait_for_tasks(cmd);
+
core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
- if (cmd->se_lun) {
-#if 0
- pr_debug("cmd: %p ITT: 0x%08x contains"
- " cmd->se_lun\n", cmd,
- cmd->se_tfo->get_task_tag(cmd));
-#endif
+ if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
- }
-
- if (wait_for_tasks && cmd->transport_wait_for_tasks)
- cmd->transport_wait_for_tasks(cmd, 0, 0);
transport_free_dev_tasks(cmd);
- transport_generic_remove(cmd, session_reinstatement);
+ transport_put_cmd(cmd);
}
}
EXPORT_SYMBOL(transport_generic_free_cmd);
-static void transport_nop_wait_for_tasks(
- struct se_cmd *cmd,
- int remove_cmd,
- int session_reinstatement)
-{
- return;
-}
-
/* transport_lun_wait_for_tasks():
*
* Called from ConfigFS context to stop the passed struct se_cmd to allow
@@ -4479,7 +4014,7 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
cmd->se_tfo->get_task_tag(cmd));
}
- transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
+ transport_remove_cmd_from_queue(cmd);
return 0;
}
@@ -4610,22 +4145,30 @@ int transport_clear_lun_from_sessions(struct se_lun *lun)
return 0;
}
-/* transport_generic_wait_for_tasks():
+/**
+ * transport_wait_for_tasks - wait for completion to occur
+ * @cmd: command to wait
*
- * Called from frontend or passthrough context to wait for storage engine
- * to pause and/or release frontend generated struct se_cmd.
+ * Called from frontend fabric context to wait for storage engine
+ * to pause and/or release frontend generated struct se_cmd.
*/
-static void transport_generic_wait_for_tasks(
- struct se_cmd *cmd,
- int remove_cmd,
- int session_reinstatement)
+void transport_wait_for_tasks(struct se_cmd *cmd)
{
unsigned long flags;
- if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
- return;
-
spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
+ }
+ /*
+ * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
+ * has been set in transport_set_supported_SAM_opcode().
+ */
+ if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
+ }
/*
* If we are already stopped due to an external event (ie: LUN shutdown)
* sleep until the connection can have the passed struct se_cmd back.
@@ -4665,16 +4208,17 @@ static void transport_generic_wait_for_tasks(
atomic_set(&cmd->transport_lun_stop, 0);
}
if (!atomic_read(&cmd->t_transport_active) ||
- atomic_read(&cmd->t_transport_aborted))
- goto remove;
+ atomic_read(&cmd->t_transport_aborted)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
+ }
atomic_set(&cmd->t_transport_stop, 1);
pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
- " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
- " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd),
- cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
- cmd->deferred_t_state);
+ " i_state: %d, t_state: %d, t_transport_stop = TRUE\n",
+ cmd, cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -4689,13 +4233,10 @@ static void transport_generic_wait_for_tasks(
pr_debug("wait_for_tasks: Stopped wait_for_compltion("
"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
cmd->se_tfo->get_task_tag(cmd));
-remove:
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- if (!remove_cmd)
- return;
- transport_generic_free_cmd(cmd, 0, session_reinstatement);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
+EXPORT_SYMBOL(transport_wait_for_tasks);
static int transport_get_sense_codes(
struct se_cmd *cmd,
@@ -4920,6 +4461,15 @@ EXPORT_SYMBOL(transport_check_aborted_status);
void transport_send_task_abort(struct se_cmd *cmd)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
/*
* If there are still expected incoming fabric WRITEs, we wait
* until until they have completed before sending a TASK_ABORTED
@@ -4984,184 +4534,10 @@ int transport_generic_do_tmr(struct se_cmd *cmd)
cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
cmd->se_tfo->queue_tm_rsp(cmd);
- transport_cmd_check_stop(cmd, 2, 0);
+ transport_cmd_check_stop_to_fabric(cmd);
return 0;
}
-/*
- * Called with spin_lock_irq(&dev->execute_task_lock); held
- *
- */
-static struct se_task *
-transport_get_task_from_state_list(struct se_device *dev)
-{
- struct se_task *task;
-
- if (list_empty(&dev->state_task_list))
- return NULL;
-
- list_for_each_entry(task, &dev->state_task_list, t_state_list)
- break;
-
- list_del(&task->t_state_list);
- atomic_set(&task->task_state_active, 0);
-
- return task;
-}
-
-static void transport_processing_shutdown(struct se_device *dev)
-{
- struct se_cmd *cmd;
- struct se_task *task;
- unsigned long flags;
- /*
- * Empty the struct se_device's struct se_task state list.
- */
- spin_lock_irqsave(&dev->execute_task_lock, flags);
- while ((task = transport_get_task_from_state_list(dev))) {
- if (!task->task_se_cmd) {
- pr_err("task->task_se_cmd is NULL!\n");
- continue;
- }
- cmd = task->task_se_cmd;
-
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
-
- pr_debug("PT: cmd: %p task: %p ITT: 0x%08x,"
- " i_state: %d, t_state/def_t_state:"
- " %d/%d cdb: 0x%02x\n", cmd, task,
- cmd->se_tfo->get_task_tag(cmd),
- cmd->se_tfo->get_cmd_state(cmd),
- cmd->t_state, cmd->deferred_t_state,
- cmd->t_task_cdb[0]);
- pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:"
- " %d t_task_cdbs_sent: %d -- t_transport_active: %d"
- " t_transport_stop: %d t_transport_sent: %d\n",
- cmd->se_tfo->get_task_tag(cmd),
- cmd->t_task_list_num,
- atomic_read(&cmd->t_task_cdbs_left),
- atomic_read(&cmd->t_task_cdbs_sent),
- atomic_read(&cmd->t_transport_active),
- atomic_read(&cmd->t_transport_stop),
- atomic_read(&cmd->t_transport_sent));
-
- if (atomic_read(&task->task_active)) {
- atomic_set(&task->task_stop, 1);
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
-
- pr_debug("Waiting for task: %p to shutdown for dev:"
- " %p\n", task, dev);
- wait_for_completion(&task->task_stop_comp);
- pr_debug("Completed task: %p shutdown for dev: %p\n",
- task, dev);
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- atomic_dec(&cmd->t_task_cdbs_left);
-
- atomic_set(&task->task_active, 0);
- atomic_set(&task->task_stop, 0);
- } else {
- if (atomic_read(&task->task_execute_queue) != 0)
- transport_remove_task_from_execute_queue(task, dev);
- }
- __transport_stop_task_timer(task, &flags);
-
- if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
-
- pr_debug("Skipping task: %p, dev: %p for"
- " t_task_cdbs_ex_left: %d\n", task, dev,
- atomic_read(&cmd->t_task_cdbs_ex_left));
-
- spin_lock_irqsave(&dev->execute_task_lock, flags);
- continue;
- }
-
- if (atomic_read(&cmd->t_transport_active)) {
- pr_debug("got t_transport_active = 1 for task: %p, dev:"
- " %p\n", task, dev);
-
- if (atomic_read(&cmd->t_fe_count)) {
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
- transport_send_check_condition_and_sense(
- cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
- 0);
- transport_remove_cmd_from_queue(cmd,
- &cmd->se_dev->dev_queue_obj);
-
- transport_lun_remove_cmd(cmd);
- transport_cmd_check_stop(cmd, 1, 0);
- } else {
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
-
- transport_remove_cmd_from_queue(cmd,
- &cmd->se_dev->dev_queue_obj);
-
- transport_lun_remove_cmd(cmd);
-
- if (transport_cmd_check_stop(cmd, 1, 0))
- transport_generic_remove(cmd, 0);
- }
-
- spin_lock_irqsave(&dev->execute_task_lock, flags);
- continue;
- }
- pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n",
- task, dev);
-
- if (atomic_read(&cmd->t_fe_count)) {
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
- transport_send_check_condition_and_sense(cmd,
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
- transport_remove_cmd_from_queue(cmd,
- &cmd->se_dev->dev_queue_obj);
-
- transport_lun_remove_cmd(cmd);
- transport_cmd_check_stop(cmd, 1, 0);
- } else {
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
-
- transport_remove_cmd_from_queue(cmd,
- &cmd->se_dev->dev_queue_obj);
- transport_lun_remove_cmd(cmd);
-
- if (transport_cmd_check_stop(cmd, 1, 0))
- transport_generic_remove(cmd, 0);
- }
-
- spin_lock_irqsave(&dev->execute_task_lock, flags);
- }
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
- /*
- * Empty the struct se_device's struct se_cmd list.
- */
- while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) {
-
- pr_debug("From Device Queue: cmd: %p t_state: %d\n",
- cmd, cmd->t_state);
-
- if (atomic_read(&cmd->t_fe_count)) {
- transport_send_check_condition_and_sense(cmd,
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
-
- transport_lun_remove_cmd(cmd);
- transport_cmd_check_stop(cmd, 1, 0);
- } else {
- transport_lun_remove_cmd(cmd);
- if (transport_cmd_check_stop(cmd, 1, 0))
- transport_generic_remove(cmd, 0);
- }
- }
-}
-
/* transport_processing_thread():
*
*
@@ -5181,14 +4557,6 @@ static int transport_processing_thread(void *param)
if (ret < 0)
goto out;
- spin_lock_irq(&dev->dev_status_lock);
- if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) {
- spin_unlock_irq(&dev->dev_status_lock);
- transport_processing_shutdown(dev);
- continue;
- }
- spin_unlock_irq(&dev->dev_status_lock);
-
get_cmd:
__transport_execute_tasks(dev);
@@ -5197,6 +4565,9 @@ get_cmd:
continue;
switch (cmd->t_state) {
+ case TRANSPORT_NEW_CMD:
+ BUG();
+ break;
case TRANSPORT_NEW_CMD_MAP:
if (!cmd->se_tfo->new_cmd_map) {
pr_err("cmd->se_tfo->new_cmd_map is"
@@ -5206,19 +4577,17 @@ get_cmd:
ret = cmd->se_tfo->new_cmd_map(cmd);
if (ret < 0) {
cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd, NULL,
+ transport_generic_request_failure(cmd,
0, (cmd->data_direction !=
DMA_TO_DEVICE));
break;
}
- /* Fall through */
- case TRANSPORT_NEW_CMD:
ret = transport_generic_new_cmd(cmd);
if (ret == -EAGAIN)
break;
else if (ret < 0) {
cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd, NULL,
+ transport_generic_request_failure(cmd,
0, (cmd->data_direction !=
DMA_TO_DEVICE));
}
@@ -5226,33 +4595,22 @@ get_cmd:
case TRANSPORT_PROCESS_WRITE:
transport_generic_process_write(cmd);
break;
- case TRANSPORT_COMPLETE_OK:
- transport_stop_all_task_timers(cmd);
- transport_generic_complete_ok(cmd);
- break;
- case TRANSPORT_REMOVE:
- transport_generic_remove(cmd, 0);
- break;
case TRANSPORT_FREE_CMD_INTR:
- transport_generic_free_cmd(cmd, 0, 0);
+ transport_generic_free_cmd(cmd, 0);
break;
case TRANSPORT_PROCESS_TMR:
transport_generic_do_tmr(cmd);
break;
- case TRANSPORT_COMPLETE_FAILURE:
- transport_generic_request_failure(cmd, NULL, 1, 1);
- break;
- case TRANSPORT_COMPLETE_TIMEOUT:
- transport_stop_all_task_timers(cmd);
- transport_generic_request_timeout(cmd);
- break;
case TRANSPORT_COMPLETE_QF_WP:
- transport_generic_write_pending(cmd);
+ transport_write_pending_qf(cmd);
+ break;
+ case TRANSPORT_COMPLETE_QF_OK:
+ transport_complete_qf(cmd);
break;
default:
- pr_err("Unknown t_state: %d deferred_t_state:"
- " %d for ITT: 0x%08x i_state: %d on SE LUN:"
- " %u\n", cmd->t_state, cmd->deferred_t_state,
+ pr_err("Unknown t_state: %d for ITT: 0x%08x "
+ "i_state: %d on SE LUN: %u\n",
+ cmd->t_state,
cmd->se_tfo->get_task_tag(cmd),
cmd->se_tfo->get_cmd_state(cmd),
cmd->se_lun->unpacked_lun);
@@ -5263,7 +4621,8 @@ get_cmd:
}
out:
- transport_release_all_cmds(dev);
+ WARN_ON(!list_empty(&dev->state_task_list));
+ WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
dev->process_thread = NULL;
return 0;
}
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index bd4fe21a23b..3749d8b4b42 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -98,8 +98,7 @@ struct ft_tpg {
struct list_head list; /* linkage in ft_lport_acl tpg_list */
struct list_head lun_list; /* head of LUNs */
struct se_portal_group se_tpg;
- struct task_struct *thread; /* processing thread */
- struct se_queue_obj qobj; /* queue for processing thread */
+ struct workqueue_struct *workqueue;
};
struct ft_lport_acl {
@@ -110,16 +109,10 @@ struct ft_lport_acl {
struct se_wwn fc_lport_wwn;
};
-enum ft_cmd_state {
- FC_CMD_ST_NEW = 0,
- FC_CMD_ST_REJ
-};
-
/*
* Commands
*/
struct ft_cmd {
- enum ft_cmd_state state;
u32 lun; /* LUN from request */
struct ft_sess *sess; /* session held for cmd */
struct fc_seq *seq; /* sequence in exchange mgr */
@@ -127,7 +120,7 @@ struct ft_cmd {
struct fc_frame *req_frame;
unsigned char *cdb; /* pointer to CDB inside frame */
u32 write_data_len; /* data received on writes */
- struct se_queue_req se_req;
+ struct work_struct work;
/* Local sense buffer */
unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
u32 was_ddp_setup:1; /* Set only if ddp is setup */
@@ -177,7 +170,6 @@ int ft_is_state_remove(struct se_cmd *);
/*
* other internal functions.
*/
-int ft_thread(void *);
void ft_recv_req(struct ft_sess *, struct fc_frame *);
struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 3633f6968f8..6195026cc7b 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -61,8 +61,8 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
int count;
se_cmd = &cmd->se_cmd;
- pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
- caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
+ pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
+ caller, cmd, cmd->sess, cmd->seq, se_cmd);
pr_debug("%s: cmd %p cdb %p\n",
caller, cmd, cmd->cdb);
pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
@@ -89,38 +89,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
}
-static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
-{
- struct ft_tpg *tpg = sess->tport->tpg;
- struct se_queue_obj *qobj = &tpg->qobj;
- unsigned long flags;
-
- qobj = &sess->tport->tpg->qobj;
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
- atomic_inc(&qobj->queue_cnt);
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
- wake_up_process(tpg->thread);
-}
-
-static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
-{
- unsigned long flags;
- struct se_queue_req *qr;
-
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- if (list_empty(&qobj->qobj_list)) {
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- return NULL;
- }
- qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
- list_del(&qr->qr_list);
- atomic_dec(&qobj->queue_cnt);
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- return container_of(qr, struct ft_cmd, se_req);
-}
-
static void ft_free_cmd(struct ft_cmd *cmd)
{
struct fc_frame *fp;
@@ -146,7 +114,7 @@ void ft_release_cmd(struct se_cmd *se_cmd)
void ft_check_stop_free(struct se_cmd *se_cmd)
{
- transport_generic_free_cmd(se_cmd, 0, 0);
+ transport_generic_free_cmd(se_cmd, 0);
}
/*
@@ -281,9 +249,7 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
int ft_get_cmd_state(struct se_cmd *se_cmd)
{
- struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
-
- return cmd->state;
+ return 0;
}
int ft_is_state_remove(struct se_cmd *se_cmd)
@@ -301,9 +267,8 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
if (IS_ERR(fp)) {
/* XXX need to find cmd if queued */
- cmd->se_cmd.t_state = TRANSPORT_REMOVE;
cmd->seq = NULL;
- transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
return;
}
@@ -321,7 +286,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
__func__, fh->fh_r_ctl);
ft_invl_hw_context(cmd);
fc_frame_free(fp);
- transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
}
}
@@ -430,7 +395,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
}
pr_debug("alloc tm cmd fn %d\n", tm_func);
- tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
+ tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func, GFP_KERNEL);
if (!tmr) {
pr_debug("alloc failed\n");
ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
@@ -454,7 +419,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
sess = cmd->sess;
transport_send_check_condition_and_sense(&cmd->se_cmd,
cmd->se_cmd.scsi_sense_reason, 0);
- transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
ft_sess_put(sess);
return;
}
@@ -504,6 +469,8 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
return 0;
}
+static void ft_send_work(struct work_struct *work);
+
/*
* Handle incoming FCP command.
*/
@@ -522,7 +489,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
goto busy;
}
cmd->req_frame = fp; /* hold frame during cmd */
- ft_queue_cmd(sess, cmd);
+
+ INIT_WORK(&cmd->work, ft_send_work);
+ queue_work(sess->tport->tpg->workqueue, &cmd->work);
return;
busy:
@@ -562,12 +531,13 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
/*
* Send new command to target.
*/
-static void ft_send_cmd(struct ft_cmd *cmd)
+static void ft_send_work(struct work_struct *work)
{
+ struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
struct se_cmd *se_cmd;
struct fcp_cmnd *fcp;
- int data_dir;
+ int data_dir = 0;
u32 data_len;
int task_attr;
int ret;
@@ -656,7 +626,7 @@ static void ft_send_cmd(struct ft_cmd *cmd)
if (ret == -ENOMEM) {
transport_send_check_condition_and_sense(se_cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
- transport_generic_free_cmd(se_cmd, 0, 0);
+ transport_generic_free_cmd(se_cmd, 0);
return;
}
if (ret == -EINVAL) {
@@ -665,51 +635,12 @@ static void ft_send_cmd(struct ft_cmd *cmd)
else
transport_send_check_condition_and_sense(se_cmd,
se_cmd->scsi_sense_reason, 0);
- transport_generic_free_cmd(se_cmd, 0, 0);
+ transport_generic_free_cmd(se_cmd, 0);
return;
}
- transport_generic_handle_cdb(se_cmd);
+ transport_handle_cdb_direct(se_cmd);
return;
err:
ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
}
-
-/*
- * Handle request in the command thread.
- */
-static void ft_exec_req(struct ft_cmd *cmd)
-{
- pr_debug("cmd state %x\n", cmd->state);
- switch (cmd->state) {
- case FC_CMD_ST_NEW:
- ft_send_cmd(cmd);
- break;
- default:
- break;
- }
-}
-
-/*
- * Processing thread.
- * Currently one thread per tpg.
- */
-int ft_thread(void *arg)
-{
- struct ft_tpg *tpg = arg;
- struct se_queue_obj *qobj = &tpg->qobj;
- struct ft_cmd *cmd;
-
- while (!kthread_should_stop()) {
- schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
- if (kthread_should_stop())
- goto out;
-
- cmd = ft_dequeue_cmd(qobj);
- if (cmd)
- ft_exec_req(cmd);
- }
-
-out:
- return 0;
-}
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index ea30e3fe402..5f770412ca4 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -31,6 +31,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/configfs.h>
+#include <linux/kernel.h>
#include <linux/ctype.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@@ -70,10 +71,10 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
{
const char *cp;
char c;
- u32 nibble;
u32 byte = 0;
u32 pos = 0;
u32 err;
+ int val;
*wwn = 0;
for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) {
@@ -94,13 +95,10 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
return cp - name;
}
err = 3;
- if (isdigit(c))
- nibble = c - '0';
- else if (isxdigit(c) && (islower(c) || !strict))
- nibble = tolower(c) - 'a' + 10;
- else
+ val = hex_to_bin(c);
+ if (val < 0 || (strict && isupper(c)))
goto fail;
- *wwn = (*wwn << 4) | nibble;
+ *wwn = (*wwn << 4) | val;
}
err = 4;
fail:
@@ -326,7 +324,6 @@ static struct se_portal_group *ft_add_tpg(
tpg->index = index;
tpg->lport_acl = lacl;
INIT_LIST_HEAD(&tpg->lun_list);
- transport_init_queue_obj(&tpg->qobj);
ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
tpg, TRANSPORT_TPG_TYPE_NORMAL);
@@ -335,8 +332,8 @@ static struct se_portal_group *ft_add_tpg(
return NULL;
}
- tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index);
- if (IS_ERR(tpg->thread)) {
+ tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1);
+ if (!tpg->workqueue) {
kfree(tpg);
return NULL;
}
@@ -355,7 +352,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
pr_debug("del tpg %s\n",
config_item_name(&tpg->se_tpg.tpg_group.cg_item));
- kthread_stop(tpg->thread);
+ destroy_workqueue(tpg->workqueue);
/* Wait for sessions to be freed thru RCU, for BUG_ON below */
synchronize_rcu();
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index ea0e7afe749..1369b1cb103 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -218,43 +218,41 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
if (cmd->was_ddp_setup) {
BUG_ON(!ep);
BUG_ON(!lport);
- }
-
- /*
- * Doesn't expect payload if DDP is setup. Payload
- * is expected to be copied directly to user buffers
- * due to DDP (Large Rx offload),
- */
- buf = fc_frame_payload_get(fp, 1);
- if (buf)
- pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
+ /*
+ * Since DDP (Large Rx offload) was setup for this request,
+ * payload is expected to be copied directly to user buffers.
+ */
+ buf = fc_frame_payload_get(fp, 1);
+ if (buf)
+ pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
"cmd->sg_cnt 0x%x. DDP was setup"
" hence not expected to receive frame with "
- "payload, Frame will be dropped if "
- "'Sequence Initiative' bit in f_ctl is "
+ "payload, Frame will be dropped if"
+ "'Sequence Initiative' bit in f_ctl is"
"not set\n", __func__, ep->xid, f_ctl,
cmd->sg, cmd->sg_cnt);
- /*
- * Invalidate HW DDP context if it was setup for respective
- * command. Invalidation of HW DDP context is requited in both
- * situation (success and error).
- */
- ft_invl_hw_context(cmd);
+ /*
+ * Invalidate HW DDP context if it was setup for respective
+ * command. Invalidation of HW DDP context is requited in both
+ * situation (success and error).
+ */
+ ft_invl_hw_context(cmd);
- /*
- * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
- * write data frame is received successfully where payload is
- * posted directly to user buffer and only the last frame's
- * header is posted in receive queue.
- *
- * If "Sequence Initiative (TSI)" bit is not set, means error
- * condition w.r.t. DDP, hence drop the packet and let explict
- * ABORTS from other end of exchange timer trigger the recovery.
- */
- if (f_ctl & FC_FC_SEQ_INIT)
- goto last_frame;
- else
- goto drop;
+ /*
+ * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
+ * write data frame is received successfully where payload is
+ * posted directly to user buffer and only the last frame's
+ * header is posted in receive queue.
+ *
+ * If "Sequence Initiative (TSI)" bit is not set, means error
+ * condition w.r.t. DDP, hence drop the packet and let explict
+ * ABORTS from other end of exchange timer trigger the recovery.
+ */
+ if (f_ctl & FC_FC_SEQ_INIT)
+ goto last_frame;
+ else
+ goto drop;
+ }
rel_off = ntohl(fh->fh_parm_offset);
frame_len = fr_len(fp);
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 58cf279ed87..bc95f52cad8 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -478,8 +478,10 @@ lqasc_set_termios(struct uart_port *port,
spin_unlock_irqrestore(&ltq_asc_lock, flags);
/* Don't rewrite B0 */
- if (tty_termios_baud_rate(new))
+ if (tty_termios_baud_rate(new))
tty_termios_encode_baud_rate(new, baud, baud);
+
+ uart_update_timeout(port, cflag, baud);
}
static const char*
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 1e96d1f1fe6..723f8231193 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -761,7 +761,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
memset(buf, 0, retval);
status = 0;
- mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC;
+ mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
spin_lock_irqsave(&xhci->lock, flags);
/* For each port, did anything change? If so, set that bit in buf. */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 54139a2f06c..952e2ded61a 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1934,8 +1934,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
int status = -EINPROGRESS;
struct urb_priv *urb_priv;
struct xhci_ep_ctx *ep_ctx;
+ struct list_head *tmp;
u32 trb_comp_code;
int ret = 0;
+ int td_num = 0;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
@@ -1957,6 +1959,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
return -ENODEV;
}
+ /* Count current td numbers if ep->skip is set */
+ if (ep->skip) {
+ list_for_each(tmp, &ep_ring->td_list)
+ td_num++;
+ }
+
event_dma = le64_to_cpu(event->buffer);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
/* Look for common error cases */
@@ -2068,7 +2076,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,
goto cleanup;
}
+ /* We've skipped all the TDs on the ep ring when ep->skip set */
+ if (ep->skip && td_num == 0) {
+ ep->skip = false;
+ xhci_dbg(xhci, "All tds on the ep_ring skipped. "
+ "Clear skip flag.\n");
+ ret = 0;
+ goto cleanup;
+ }
+
td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+ if (ep->skip)
+ td_num--;
/* Is this a TRB in the currently executing TD? */
event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 410fba45378..809cbda03d7 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -494,15 +494,16 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
asminline_call(&cmn_regs, cru_rom_addr);
die_nmi_called = 1;
spin_unlock_irqrestore(&rom_lock, rom_pl);
+
+ if (allow_kdump)
+ hpwdt_stop();
+
if (!is_icru) {
if (cmn_regs.u1.ral == 0) {
- printk(KERN_WARNING "hpwdt: An NMI occurred, "
+ panic("An NMI occurred, "
"but unable to determine source.\n");
}
}
-
- if (allow_kdump)
- hpwdt_stop();
panic("An NMI occurred, please see the Integrated "
"Management Log for details.\n");
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index 7d82adac1cb..102aed0efbf 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -51,16 +51,16 @@ static int ltq_wdt_ok_to_close;
static void
ltq_wdt_enable(void)
{
- ltq_wdt_timeout = ltq_wdt_timeout *
+ unsigned long int timeout = ltq_wdt_timeout *
(ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000;
- if (ltq_wdt_timeout > LTQ_MAX_TIMEOUT)
- ltq_wdt_timeout = LTQ_MAX_TIMEOUT;
+ if (timeout > LTQ_MAX_TIMEOUT)
+ timeout = LTQ_MAX_TIMEOUT;
/* write the first password magic */
ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
/* write the second magic plus the configuration and new timeout */
ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV |
- LTQ_WDT_PW2 | ltq_wdt_timeout, ltq_wdt_membase + LTQ_WDT_CR);
+ LTQ_WDT_PW2 | timeout, ltq_wdt_membase + LTQ_WDT_CR);
}
static void
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c
index 3066a5127ca..eaca366b723 100644
--- a/drivers/watchdog/sbc_epx_c3.c
+++ b/drivers/watchdog/sbc_epx_c3.c
@@ -173,7 +173,7 @@ static struct notifier_block epx_c3_notifier = {
.notifier_call = epx_c3_notify_sys,
};
-static const char banner[] __initdata = KERN_INFO PFX
+static const char banner[] __initconst = KERN_INFO PFX
"Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n";
static int __init watchdog_init(void)
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index d33520d0b4c..1199da0f98c 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -59,7 +59,7 @@ static struct watchdog_device *wdd;
static int watchdog_ping(struct watchdog_device *wddev)
{
- if (test_bit(WDOG_ACTIVE, &wdd->status)) {
+ if (test_bit(WDOG_ACTIVE, &wddev->status)) {
if (wddev->ops->ping)
return wddev->ops->ping(wddev); /* ping the watchdog */
else
@@ -81,12 +81,12 @@ static int watchdog_start(struct watchdog_device *wddev)
{
int err;
- if (!test_bit(WDOG_ACTIVE, &wdd->status)) {
+ if (!test_bit(WDOG_ACTIVE, &wddev->status)) {
err = wddev->ops->start(wddev);
if (err < 0)
return err;
- set_bit(WDOG_ACTIVE, &wdd->status);
+ set_bit(WDOG_ACTIVE, &wddev->status);
}
return 0;
}
@@ -105,18 +105,18 @@ static int watchdog_stop(struct watchdog_device *wddev)
{
int err = -EBUSY;
- if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) {
+ if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
pr_info("%s: nowayout prevents watchdog to be stopped!\n",
- wdd->info->identity);
+ wddev->info->identity);
return err;
}
- if (test_bit(WDOG_ACTIVE, &wdd->status)) {
+ if (test_bit(WDOG_ACTIVE, &wddev->status)) {
err = wddev->ops->stop(wddev);
if (err < 0)
return err;
- clear_bit(WDOG_ACTIVE, &wdd->status);
+ clear_bit(WDOG_ACTIVE, &wddev->status);
}
return 0;
}
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 5f7ff8e2fc1..8795480c235 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -137,16 +137,6 @@ config XEN_GRANT_DEV_ALLOC
to other domains. This can be used to implement frontend drivers
or as part of an inter-domain shared memory channel.
-config XEN_PLATFORM_PCI
- tristate "xen platform pci device driver"
- depends on XEN_PVHVM && PCI
- default m
- help
- Driver for the Xen PCI Platform device: it is responsible for
- initializing xenbus and grant_table when running in a Xen HVM
- domain. As a consequence this driver is required to run any Xen PV
- frontend on Xen HVM.
-
config SWIOTLB_XEN
def_bool y
depends on PCI
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 72bbb27d7a6..974fffdf22b 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -14,7 +14,7 @@ obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o
obj-$(CONFIG_XENFS) += xenfs/
obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
-obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o
+obj-$(CONFIG_XEN_PVHVM) += platform-pci.o
obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_DOM0) += pci.o
@@ -23,5 +23,3 @@ obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
xen-gntalloc-y := gntalloc.o
-
-xen-platform-pci-y := platform-pci.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 5dfd8f8ff07..5876e1ae6c2 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -501,20 +501,24 @@ EXPORT_SYMBOL_GPL(balloon_set_new_target);
* alloc_xenballooned_pages - get pages that have been ballooned out
* @nr_pages: Number of pages to get
* @pages: pages returned
+ * @highmem: highmem or lowmem pages
* @return 0 on success, error otherwise
*/
-int alloc_xenballooned_pages(int nr_pages, struct page** pages)
+int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
{
int pgno = 0;
struct page* page;
mutex_lock(&balloon_mutex);
while (pgno < nr_pages) {
- page = balloon_retrieve(true);
- if (page) {
+ page = balloon_retrieve(highmem);
+ if (page && PageHighMem(page) == highmem) {
pages[pgno++] = page;
} else {
enum bp_state st;
- st = decrease_reservation(nr_pages - pgno, GFP_HIGHUSER);
+ if (page)
+ balloon_append(page);
+ st = decrease_reservation(nr_pages - pgno,
+ highmem ? GFP_HIGHUSER : GFP_USER);
if (st != BP_DONE)
goto out_undo;
}
@@ -555,17 +559,40 @@ void free_xenballooned_pages(int nr_pages, struct page** pages)
}
EXPORT_SYMBOL(free_xenballooned_pages);
-static int __init balloon_init(void)
+static void __init balloon_add_region(unsigned long start_pfn,
+ unsigned long pages)
{
unsigned long pfn, extra_pfn_end;
struct page *page;
+ /*
+ * If the amount of usable memory has been limited (e.g., with
+ * the 'mem' command line parameter), don't add pages beyond
+ * this limit.
+ */
+ extra_pfn_end = min(max_pfn, start_pfn + pages);
+
+ for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
+ page = pfn_to_page(pfn);
+ /* totalram_pages and totalhigh_pages do not
+ include the boot-time balloon extension, so
+ don't subtract from it. */
+ __balloon_append(page);
+ }
+}
+
+static int __init balloon_init(void)
+{
+ int i;
+
if (!xen_domain())
return -ENODEV;
pr_info("xen/balloon: Initialising balloon driver.\n");
- balloon_stats.current_pages = xen_pv_domain() ? min(xen_start_info->nr_pages, max_pfn) : max_pfn;
+ balloon_stats.current_pages = xen_pv_domain()
+ ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
+ : max_pfn;
balloon_stats.target_pages = balloon_stats.current_pages;
balloon_stats.balloon_low = 0;
balloon_stats.balloon_high = 0;
@@ -584,24 +611,13 @@ static int __init balloon_init(void)
#endif
/*
- * Initialise the balloon with excess memory space. We need
- * to make sure we don't add memory which doesn't exist or
- * logically exist. The E820 map can be trimmed to be smaller
- * than the amount of physical memory due to the mem= command
- * line parameter. And if this is a 32-bit non-HIGHMEM kernel
- * on a system with memory which requires highmem to access,
- * don't try to use it.
+ * Initialize the balloon with pages from the extra memory
+ * regions (see arch/x86/xen/setup.c).
*/
- extra_pfn_end = min(min(max_pfn, e820_end_of_ram_pfn()),
- (unsigned long)PFN_DOWN(xen_extra_mem_start + xen_extra_mem_size));
- for (pfn = PFN_UP(xen_extra_mem_start);
- pfn < extra_pfn_end;
- pfn++) {
- page = pfn_to_page(pfn);
- /* totalram_pages and totalhigh_pages do not include the boot-time
- balloon extension, so don't subtract from it. */
- __balloon_append(page);
- }
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
+ if (xen_extra_mem[i].size)
+ balloon_add_region(PFN_UP(xen_extra_mem[i].start),
+ PFN_DOWN(xen_extra_mem[i].size));
return 0;
}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index da70f5c32eb..7a55b292bf3 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -54,7 +54,7 @@
* This lock protects updates to the following mapping and reference-count
* arrays. The lock does not need to be acquired to read the mapping tables.
*/
-static DEFINE_SPINLOCK(irq_mapping_update_lock);
+static DEFINE_MUTEX(irq_mapping_update_lock);
static LIST_HEAD(xen_irq_list_head);
@@ -432,7 +432,8 @@ static int __must_check xen_allocate_irq_dynamic(void)
irq = irq_alloc_desc_from(first, -1);
- xen_irq_init(irq);
+ if (irq >= 0)
+ xen_irq_init(irq);
return irq;
}
@@ -631,7 +632,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
int irq = -1;
struct physdev_irq irq_op;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
irq = find_irq_by_gsi(gsi);
if (irq != -1) {
@@ -684,7 +685,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
handle_edge_irq, name);
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
@@ -710,10 +711,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
{
int irq, ret;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
irq = xen_allocate_irq_dynamic();
- if (irq == -1)
+ if (irq < 0)
goto out;
irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
@@ -724,12 +725,12 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
if (ret < 0)
goto error_irq;
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return irq;
error_irq:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
xen_free_irq(irq);
- return -1;
+ return ret;
}
#endif
@@ -740,7 +741,7 @@ int xen_destroy_irq(int irq)
struct irq_info *info = info_for_irq(irq);
int rc = -ENOENT;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
desc = irq_to_desc(irq);
if (!desc)
@@ -766,7 +767,7 @@ int xen_destroy_irq(int irq)
xen_free_irq(irq);
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return rc;
}
@@ -776,10 +777,10 @@ int xen_irq_from_pirq(unsigned pirq)
struct irq_info *info;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
list_for_each_entry(info, &xen_irq_list_head, list) {
- if (info == NULL || info->type != IRQT_PIRQ)
+ if (info->type != IRQT_PIRQ)
continue;
irq = info->irq;
if (info->u.pirq.pirq == pirq)
@@ -787,7 +788,7 @@ int xen_irq_from_pirq(unsigned pirq)
}
irq = -1;
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
@@ -802,7 +803,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
{
int irq;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
irq = evtchn_to_irq[evtchn];
@@ -818,7 +819,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
}
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
@@ -829,7 +830,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
struct evtchn_bind_ipi bind_ipi;
int evtchn, irq;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
irq = per_cpu(ipi_to_irq, cpu)[ipi];
@@ -853,7 +854,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
}
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
@@ -872,13 +873,34 @@ static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
}
+static int find_virq(unsigned int virq, unsigned int cpu)
+{
+ struct evtchn_status status;
+ int port, rc = -ENOENT;
+
+ memset(&status, 0, sizeof(status));
+ for (port = 0; port <= NR_EVENT_CHANNELS; port++) {
+ status.dom = DOMID_SELF;
+ status.port = port;
+ rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
+ if (rc < 0)
+ continue;
+ if (status.status != EVTCHNSTAT_virq)
+ continue;
+ if (status.u.virq == virq && status.vcpu == cpu) {
+ rc = port;
+ break;
+ }
+ }
+ return rc;
+}
int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
{
struct evtchn_bind_virq bind_virq;
- int evtchn, irq;
+ int evtchn, irq, ret;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
irq = per_cpu(virq_to_irq, cpu)[virq];
@@ -892,10 +914,16 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
bind_virq.virq = virq;
bind_virq.vcpu = cpu;
- if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
- &bind_virq) != 0)
- BUG();
- evtchn = bind_virq.port;
+ ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
+ &bind_virq);
+ if (ret == 0)
+ evtchn = bind_virq.port;
+ else {
+ if (ret == -EEXIST)
+ ret = find_virq(virq, cpu);
+ BUG_ON(ret < 0);
+ evtchn = ret;
+ }
xen_irq_info_virq_init(cpu, irq, evtchn, virq);
@@ -903,7 +931,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
}
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
@@ -913,7 +941,7 @@ static void unbind_from_irq(unsigned int irq)
struct evtchn_close close;
int evtchn = evtchn_from_irq(irq);
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
if (VALID_EVTCHN(evtchn)) {
close.port = evtchn;
@@ -943,7 +971,7 @@ static void unbind_from_irq(unsigned int irq)
xen_free_irq(irq);
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
}
int bind_evtchn_to_irqhandler(unsigned int evtchn,
@@ -1279,7 +1307,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
will also be masked. */
disable_irq(irq);
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
/* After resume the irq<->evtchn mappings are all cleared out */
BUG_ON(evtchn_to_irq[evtchn] != -1);
@@ -1289,7 +1317,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
xen_irq_info_evtchn_init(irq, evtchn);
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
/* new event channels are always bound to cpu 0 */
irq_set_affinity(irq, cpumask_of(0));
@@ -1670,6 +1698,7 @@ void __init xen_init_IRQ(void)
evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
GFP_KERNEL);
+ BUG_ON(!evtchn_to_irq);
for (i = 0; i < NR_EVENT_CHANNELS; i++)
evtchn_to_irq[i] = -1;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index f914b26cf0c..880798aae2f 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -83,6 +83,7 @@ struct grant_map {
struct ioctl_gntdev_grant_ref *grants;
struct gnttab_map_grant_ref *map_ops;
struct gnttab_unmap_grant_ref *unmap_ops;
+ struct gnttab_map_grant_ref *kmap_ops;
struct page **pages;
};
@@ -116,19 +117,22 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
add->grants = kzalloc(sizeof(add->grants[0]) * count, GFP_KERNEL);
add->map_ops = kzalloc(sizeof(add->map_ops[0]) * count, GFP_KERNEL);
add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL);
+ add->kmap_ops = kzalloc(sizeof(add->kmap_ops[0]) * count, GFP_KERNEL);
add->pages = kzalloc(sizeof(add->pages[0]) * count, GFP_KERNEL);
if (NULL == add->grants ||
NULL == add->map_ops ||
NULL == add->unmap_ops ||
+ NULL == add->kmap_ops ||
NULL == add->pages)
goto err;
- if (alloc_xenballooned_pages(count, add->pages))
+ if (alloc_xenballooned_pages(count, add->pages, false /* lowmem */))
goto err;
for (i = 0; i < count; i++) {
add->map_ops[i].handle = -1;
add->unmap_ops[i].handle = -1;
+ add->kmap_ops[i].handle = -1;
}
add->index = 0;
@@ -142,6 +146,7 @@ err:
kfree(add->grants);
kfree(add->map_ops);
kfree(add->unmap_ops);
+ kfree(add->kmap_ops);
kfree(add);
return NULL;
}
@@ -243,10 +248,35 @@ static int map_grant_pages(struct grant_map *map)
gnttab_set_unmap_op(&map->unmap_ops[i], addr,
map->flags, -1 /* handle */);
}
+ } else {
+ /*
+ * Setup the map_ops corresponding to the pte entries pointing
+ * to the kernel linear addresses of the struct pages.
+ * These ptes are completely different from the user ptes dealt
+ * with find_grant_ptes.
+ */
+ for (i = 0; i < map->count; i++) {
+ unsigned level;
+ unsigned long address = (unsigned long)
+ pfn_to_kaddr(page_to_pfn(map->pages[i]));
+ pte_t *ptep;
+ u64 pte_maddr = 0;
+ BUG_ON(PageHighMem(map->pages[i]));
+
+ ptep = lookup_address(address, &level);
+ pte_maddr = arbitrary_virt_to_machine(ptep).maddr;
+ gnttab_set_map_op(&map->kmap_ops[i], pte_maddr,
+ map->flags |
+ GNTMAP_host_map |
+ GNTMAP_contains_pte,
+ map->grants[i].ref,
+ map->grants[i].domid);
+ }
}
pr_debug("map %d+%d\n", map->index, map->count);
- err = gnttab_map_refs(map->map_ops, map->pages, map->count);
+ err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
+ map->pages, map->count);
if (err)
return err;
@@ -462,13 +492,11 @@ static int gntdev_release(struct inode *inode, struct file *flip)
pr_debug("priv %p\n", priv);
- spin_lock(&priv->lock);
while (!list_empty(&priv->maps)) {
map = list_entry(priv->maps.next, struct grant_map, next);
list_del(&map->next);
gntdev_put_map(map);
}
- spin_unlock(&priv->lock);
if (use_ptemod)
mmu_notifier_unregister(&priv->mn, priv->mm);
@@ -532,10 +560,11 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
if (map) {
list_del(&map->next);
- gntdev_put_map(map);
err = 0;
}
spin_unlock(&priv->lock);
+ if (map)
+ gntdev_put_map(map);
return err;
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 4f44b347b24..8c71ab80175 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -448,7 +448,8 @@ unsigned int gnttab_max_grant_frames(void)
EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
- struct page **pages, unsigned int count)
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count)
{
int i, ret;
pte_t *pte;
@@ -488,8 +489,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
*/
return -EOPNOTSUPP;
}
- ret = m2p_add_override(mfn, pages[i],
- map_ops[i].flags & GNTMAP_contains_pte);
+ ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
if (ret)
return ret;
}
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index cef4bafc07d..66057075d6e 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -18,6 +18,7 @@
*/
#include <linux/pci.h>
+#include <linux/acpi.h>
#include <xen/xen.h>
#include <xen/interface/physdev.h>
#include <xen/interface/xen.h>
@@ -26,26 +27,85 @@
#include <asm/xen/hypercall.h>
#include "../pci/pci.h"
+static bool __read_mostly pci_seg_supported = true;
+
static int xen_add_device(struct device *dev)
{
int r;
struct pci_dev *pci_dev = to_pci_dev(dev);
+#ifdef CONFIG_PCI_IOV
+ struct pci_dev *physfn = pci_dev->physfn;
+#endif
+
+ if (pci_seg_supported) {
+ struct physdev_pci_device_add add = {
+ .seg = pci_domain_nr(pci_dev->bus),
+ .bus = pci_dev->bus->number,
+ .devfn = pci_dev->devfn
+ };
+#ifdef CONFIG_ACPI
+ acpi_handle handle;
+#endif
+
+#ifdef CONFIG_PCI_IOV
+ if (pci_dev->is_virtfn) {
+ add.flags = XEN_PCI_DEV_VIRTFN;
+ add.physfn.bus = physfn->bus->number;
+ add.physfn.devfn = physfn->devfn;
+ } else
+#endif
+ if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
+ add.flags = XEN_PCI_DEV_EXTFN;
+
+#ifdef CONFIG_ACPI
+ handle = DEVICE_ACPI_HANDLE(&pci_dev->dev);
+ if (!handle)
+ handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge);
+#ifdef CONFIG_PCI_IOV
+ if (!handle && pci_dev->is_virtfn)
+ handle = DEVICE_ACPI_HANDLE(physfn->bus->bridge);
+#endif
+ if (handle) {
+ acpi_status status;
+
+ do {
+ unsigned long long pxm;
+
+ status = acpi_evaluate_integer(handle, "_PXM",
+ NULL, &pxm);
+ if (ACPI_SUCCESS(status)) {
+ add.optarr[0] = pxm;
+ add.flags |= XEN_PCI_DEV_PXM;
+ break;
+ }
+ status = acpi_get_parent(handle, &handle);
+ } while (ACPI_SUCCESS(status));
+ }
+#endif /* CONFIG_ACPI */
+
+ r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, &add);
+ if (r != -ENOSYS)
+ return r;
+ pci_seg_supported = false;
+ }
+ if (pci_domain_nr(pci_dev->bus))
+ r = -ENOSYS;
#ifdef CONFIG_PCI_IOV
- if (pci_dev->is_virtfn) {
+ else if (pci_dev->is_virtfn) {
struct physdev_manage_pci_ext manage_pci_ext = {
.bus = pci_dev->bus->number,
.devfn = pci_dev->devfn,
.is_virtfn = 1,
- .physfn.bus = pci_dev->physfn->bus->number,
- .physfn.devfn = pci_dev->physfn->devfn,
+ .physfn.bus = physfn->bus->number,
+ .physfn.devfn = physfn->devfn,
};
r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
&manage_pci_ext);
- } else
+ }
#endif
- if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
+ else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
struct physdev_manage_pci_ext manage_pci_ext = {
.bus = pci_dev->bus->number,
.devfn = pci_dev->devfn,
@@ -71,13 +131,27 @@ static int xen_remove_device(struct device *dev)
{
int r;
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct physdev_manage_pci manage_pci;
- manage_pci.bus = pci_dev->bus->number;
- manage_pci.devfn = pci_dev->devfn;
+ if (pci_seg_supported) {
+ struct physdev_pci_device device = {
+ .seg = pci_domain_nr(pci_dev->bus),
+ .bus = pci_dev->bus->number,
+ .devfn = pci_dev->devfn
+ };
- r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
- &manage_pci);
+ r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove,
+ &device);
+ } else if (pci_domain_nr(pci_dev->bus))
+ r = -ENOSYS;
+ else {
+ struct physdev_manage_pci manage_pci = {
+ .bus = pci_dev->bus->number,
+ .devfn = pci_dev->devfn
+ };
+
+ r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
+ &manage_pci);
+ }
return r;
}
@@ -96,13 +170,16 @@ static int xen_pci_notifier(struct notifier_block *nb,
r = xen_remove_device(dev);
break;
default:
- break;
+ return NOTIFY_DONE;
}
-
- return r;
+ if (r)
+ dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n",
+ action == BUS_NOTIFY_ADD_DEVICE ? "add" :
+ (action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?"));
+ return NOTIFY_OK;
}
-struct notifier_block device_nb = {
+static struct notifier_block device_nb = {
.notifier_call = xen_pci_notifier,
};
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 6e8c15a2320..c984768d98c 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -38,6 +38,7 @@
#include <xen/swiotlb-xen.h>
#include <xen/page.h>
#include <xen/xen-ops.h>
+#include <xen/hvc-console.h>
/*
* Used to do a quick range check in swiotlb_tbl_unmap_single and
* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
@@ -146,8 +147,10 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
void __init xen_swiotlb_init(int verbose)
{
unsigned long bytes;
- int rc;
+ int rc = -ENOMEM;
unsigned long nr_tbl;
+ char *m = NULL;
+ unsigned int repeat = 3;
nr_tbl = swioltb_nr_tbl();
if (nr_tbl)
@@ -156,16 +159,17 @@ void __init xen_swiotlb_init(int verbose)
xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
}
-
+retry:
bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
/*
* Get IO TLB memory from any location.
*/
xen_io_tlb_start = alloc_bootmem(bytes);
- if (!xen_io_tlb_start)
- panic("Cannot allocate SWIOTLB buffer");
-
+ if (!xen_io_tlb_start) {
+ m = "Cannot allocate Xen-SWIOTLB buffer!\n";
+ goto error;
+ }
xen_io_tlb_end = xen_io_tlb_start + bytes;
/*
* And replace that memory with pages under 4GB.
@@ -173,17 +177,28 @@ void __init xen_swiotlb_init(int verbose)
rc = xen_swiotlb_fixup(xen_io_tlb_start,
bytes,
xen_io_tlb_nslabs);
- if (rc)
+ if (rc) {
+ free_bootmem(__pa(xen_io_tlb_start), bytes);
+ m = "Failed to get contiguous memory for DMA from Xen!\n"\
+ "You either: don't have the permissions, do not have"\
+ " enough free memory under 4GB, or the hypervisor memory"\
+ "is too fragmented!";
goto error;
-
+ }
start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
return;
error:
- panic("DMA(%d): Failed to exchange pages allocated for DMA with Xen! "\
- "We either don't have the permission or you do not have enough"\
- "free memory under 4GB!\n", rc);
+ if (repeat--) {
+ xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
+ (xen_io_tlb_nslabs >> 1));
+ printk(KERN_INFO "Xen-SWIOTLB: Lowering to %luMB\n",
+ (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
+ goto retry;
+ }
+ xen_raw_printk("%s (rc:%d)", m, rc);
+ panic("%s (rc:%d)", m, rc);
}
void *
@@ -194,6 +209,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
int order = get_order(size);
u64 dma_mask = DMA_BIT_MASK(32);
unsigned long vstart;
+ phys_addr_t phys;
+ dma_addr_t dev_addr;
/*
* Ignore region specifiers - the kernel's ideas of
@@ -209,18 +226,26 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
vstart = __get_free_pages(flags, order);
ret = (void *)vstart;
+ if (!ret)
+ return ret;
+
if (hwdev && hwdev->coherent_dma_mask)
- dma_mask = dma_alloc_coherent_mask(hwdev, flags);
+ dma_mask = hwdev->coherent_dma_mask;
- if (ret) {
+ phys = virt_to_phys(ret);
+ dev_addr = xen_phys_to_bus(phys);
+ if (((dev_addr + size - 1 <= dma_mask)) &&
+ !range_straddles_page_boundary(phys, size))
+ *dma_handle = dev_addr;
+ else {
if (xen_create_contiguous_region(vstart, order,
fls64(dma_mask)) != 0) {
free_pages(vstart, order);
return NULL;
}
- memset(ret, 0, size);
*dma_handle = virt_to_machine(ret).maddr;
}
+ memset(ret, 0, size);
return ret;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
@@ -230,11 +255,21 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dev_addr)
{
int order = get_order(size);
+ phys_addr_t phys;
+ u64 dma_mask = DMA_BIT_MASK(32);
if (dma_release_from_coherent(hwdev, order, vaddr))
return;
- xen_destroy_contiguous_region((unsigned long)vaddr, order);
+ if (hwdev && hwdev->coherent_dma_mask)
+ dma_mask = hwdev->coherent_dma_mask;
+
+ phys = virt_to_phys(vaddr);
+
+ if (((dev_addr + size - 1 > dma_mask)) ||
+ range_straddles_page_boundary(phys, size))
+ xen_destroy_contiguous_region((unsigned long)vaddr, order);
+
free_pages((unsigned long)vaddr, order);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
@@ -278,9 +313,10 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
/*
* Ensure that the address returned is DMA'ble
*/
- if (!dma_capable(dev, dev_addr, size))
- panic("map_single: bounce buffer is not DMA'ble");
-
+ if (!dma_capable(dev, dev_addr, size)) {
+ swiotlb_tbl_unmap_single(dev, map, size, dir);
+ dev_addr = 0;
+ }
return dev_addr;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index a8031445d94..444345afbd5 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -15,7 +15,6 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
-#define DRV_NAME "xen-pciback"
static int permissive;
module_param(permissive, bool, 0644);
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index da3cbdfcb5d..3daf862d739 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -15,7 +15,6 @@ struct pci_bar_info {
int which;
};
-#define DRV_NAME "xen-pciback"
#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
@@ -25,7 +24,7 @@ static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
int ret;
ret = xen_pcibk_read_config_word(dev, offset, value, data);
- if (!atomic_read(&dev->enable_cnt))
+ if (!pci_is_enabled(dev))
return ret;
for (i = 0; i < PCI_ROM_RESOURCE; i++) {
@@ -187,7 +186,7 @@ static inline void read_dev_bar(struct pci_dev *dev,
bar_info->val = res[pos].start |
(res[pos].flags & PCI_REGION_FLAG_MASK);
- bar_info->len_val = res[pos].end - res[pos].start + 1;
+ bar_info->len_val = resource_size(&res[pos]);
}
static void *bar_init(struct pci_dev *dev, int offset)
diff --git a/drivers/xen/xen-pciback/conf_space_quirks.c b/drivers/xen/xen-pciback/conf_space_quirks.c
index 921a889e65e..7476791cab4 100644
--- a/drivers/xen/xen-pciback/conf_space_quirks.c
+++ b/drivers/xen/xen-pciback/conf_space_quirks.c
@@ -12,7 +12,6 @@
#include "conf_space_quirks.h"
LIST_HEAD(xen_pcibk_quirks);
-#define DRV_NAME "xen-pciback"
static inline const struct pci_device_id *
match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
{
@@ -36,7 +35,7 @@ static struct xen_pcibk_config_quirk *xen_pcibk_find_quirk(struct pci_dev *dev)
goto out;
tmp_quirk = NULL;
printk(KERN_DEBUG DRV_NAME
- ":quirk didn't match any device xen_pciback knows about\n");
+ ": quirk didn't match any device known\n");
out:
return tmp_quirk;
}
diff --git a/drivers/xen/xen-pciback/passthrough.c b/drivers/xen/xen-pciback/passthrough.c
index 1d32a9a42c0..828dddc360d 100644
--- a/drivers/xen/xen-pciback/passthrough.c
+++ b/drivers/xen/xen-pciback/passthrough.c
@@ -7,13 +7,13 @@
#include <linux/list.h>
#include <linux/pci.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include "pciback.h"
struct passthrough_dev_data {
/* Access to dev_list must be protected by lock */
struct list_head dev_list;
- spinlock_t lock;
+ struct mutex lock;
};
static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
@@ -24,9 +24,8 @@ static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry;
struct pci_dev *dev = NULL;
- unsigned long flags;
- spin_lock_irqsave(&dev_data->lock, flags);
+ mutex_lock(&dev_data->lock);
list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
@@ -37,7 +36,7 @@ static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
}
}
- spin_unlock_irqrestore(&dev_data->lock, flags);
+ mutex_unlock(&dev_data->lock);
return dev;
}
@@ -48,7 +47,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
{
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry;
- unsigned long flags;
unsigned int domain, bus, devfn;
int err;
@@ -57,9 +55,9 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
return -ENOMEM;
dev_entry->dev = dev;
- spin_lock_irqsave(&dev_data->lock, flags);
+ mutex_lock(&dev_data->lock);
list_add_tail(&dev_entry->list, &dev_data->dev_list);
- spin_unlock_irqrestore(&dev_data->lock, flags);
+ mutex_unlock(&dev_data->lock);
/* Publish this device. */
domain = (unsigned int)pci_domain_nr(dev->bus);
@@ -76,9 +74,8 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry, *t;
struct pci_dev *found_dev = NULL;
- unsigned long flags;
- spin_lock_irqsave(&dev_data->lock, flags);
+ mutex_lock(&dev_data->lock);
list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
if (dev_entry->dev == dev) {
@@ -88,7 +85,7 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
}
}
- spin_unlock_irqrestore(&dev_data->lock, flags);
+ mutex_unlock(&dev_data->lock);
if (found_dev)
pcistub_put_pci_dev(found_dev);
@@ -102,7 +99,7 @@ static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
if (!dev_data)
return -ENOMEM;
- spin_lock_init(&dev_data->lock);
+ mutex_init(&dev_data->lock);
INIT_LIST_HEAD(&dev_data->dev_list);
@@ -116,14 +113,14 @@ static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
{
int err = 0;
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
- struct pci_dev_entry *dev_entry, *e, *tmp;
+ struct pci_dev_entry *dev_entry, *e;
struct pci_dev *dev;
int found;
unsigned int domain, bus;
- spin_lock(&dev_data->lock);
+ mutex_lock(&dev_data->lock);
- list_for_each_entry_safe(dev_entry, tmp, &dev_data->dev_list, list) {
+ list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
/* Only publish this device as a root if none of its
* parent bridges are exported
*/
@@ -142,16 +139,13 @@ static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
bus = (unsigned int)dev_entry->dev->bus->number;
if (!found) {
- spin_unlock(&dev_data->lock);
err = publish_root_cb(pdev, domain, bus);
if (err)
break;
- spin_lock(&dev_data->lock);
}
}
- if (!err)
- spin_unlock(&dev_data->lock);
+ mutex_unlock(&dev_data->lock);
return err;
}
@@ -182,7 +176,7 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
return 1;
}
-struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
+const struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
.name = "passthrough",
.init = __xen_pcibk_init_devices,
.free = __xen_pcibk_release_devices,
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index aec214ac0a1..8f06e1ed028 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -21,8 +21,6 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
-#define DRV_NAME "xen-pciback"
-
static char *pci_devs_to_hide;
wait_queue_head_t xen_pcibk_aer_wait_queue;
/*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops,
@@ -222,6 +220,8 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
}
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+ if (WARN_ON(!found_psdev))
+ return;
/*hold this lock for avoiding breaking link between
* pcistub and xen_pcibk when AER is in processing
@@ -514,12 +514,9 @@ static void kill_domain_by_device(struct pcistub_device *psdev)
int err;
char nodename[PCI_NODENAME_MAX];
- if (!psdev)
- dev_err(&psdev->dev->dev,
- "device is NULL when do AER recovery/kill_domain\n");
+ BUG_ON(!psdev);
snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0",
psdev->pdev->xdev->otherend_id);
- nodename[strlen(nodename)] = '\0';
again:
err = xenbus_transaction_start(&xbt);
@@ -605,7 +602,7 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
if (test_bit(_XEN_PCIF_active,
(unsigned long *)&psdev->pdev->sh_info->flags)) {
dev_dbg(&psdev->dev->dev,
- "schedule pci_conf service in xen_pcibk\n");
+ "schedule pci_conf service in " DRV_NAME "\n");
xen_pcibk_test_and_schedule_op(psdev->pdev);
}
@@ -995,8 +992,7 @@ out:
err = count;
return err;
}
-
-DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
+static DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
size_t count)
@@ -1015,8 +1011,7 @@ out:
err = count;
return err;
}
-
-DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
+static DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
{
@@ -1039,8 +1034,7 @@ static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
return count;
}
-
-DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
+static DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
{
@@ -1069,8 +1063,7 @@ static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
return count;
}
-
-DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL);
+static DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL);
static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,
const char *buf,
@@ -1106,7 +1099,8 @@ out:
err = count;
return err;
}
-DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL, pcistub_irq_handler_switch);
+static DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL,
+ pcistub_irq_handler_switch);
static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
size_t count)
@@ -1170,8 +1164,8 @@ out:
return count;
}
-
-DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, pcistub_quirk_add);
+static DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show,
+ pcistub_quirk_add);
static ssize_t permissive_add(struct device_driver *drv, const char *buf,
size_t count)
@@ -1236,8 +1230,8 @@ static ssize_t permissive_show(struct device_driver *drv, char *buf)
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
return count;
}
-
-DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add);
+static DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show,
+ permissive_add);
static void pcistub_exit(void)
{
@@ -1374,3 +1368,4 @@ module_init(xen_pcibk_init);
module_exit(xen_pcibk_cleanup);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("xen-backend:pci");
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index a0e131a8150..e9b4011c5f9 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -15,6 +15,8 @@
#include <linux/atomic.h>
#include <xen/interface/io/pciif.h>
+#define DRV_NAME "xen-pciback"
+
struct pci_dev_entry {
struct list_head list;
struct pci_dev *dev;
@@ -27,7 +29,7 @@ struct pci_dev_entry {
struct xen_pcibk_device {
void *pci_dev_data;
- spinlock_t dev_lock;
+ struct mutex dev_lock;
struct xenbus_device *xdev;
struct xenbus_watch be_watch;
u8 be_watching;
@@ -89,7 +91,7 @@ typedef int (*publish_pci_root_cb) (struct xen_pcibk_device *pdev,
* passthrough - BDFs are exactly like in the host.
*/
struct xen_pcibk_backend {
- char *name;
+ const char *name;
int (*init)(struct xen_pcibk_device *pdev);
void (*free)(struct xen_pcibk_device *pdev);
int (*find)(struct pci_dev *pcidev, struct xen_pcibk_device *pdev,
@@ -104,9 +106,9 @@ struct xen_pcibk_backend {
unsigned int devfn);
};
-extern struct xen_pcibk_backend xen_pcibk_vpci_backend;
-extern struct xen_pcibk_backend xen_pcibk_passthrough_backend;
-extern struct xen_pcibk_backend *xen_pcibk_backend;
+extern const struct xen_pcibk_backend xen_pcibk_vpci_backend;
+extern const struct xen_pcibk_backend xen_pcibk_passthrough_backend;
+extern const struct xen_pcibk_backend *xen_pcibk_backend;
static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev,
@@ -116,13 +118,14 @@ static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
if (xen_pcibk_backend && xen_pcibk_backend->add)
return xen_pcibk_backend->add(pdev, dev, devid, publish_cb);
return -1;
-};
+}
+
static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev)
{
if (xen_pcibk_backend && xen_pcibk_backend->free)
return xen_pcibk_backend->release(pdev, dev);
-};
+}
static inline struct pci_dev *
xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain,
@@ -131,7 +134,8 @@ xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain,
if (xen_pcibk_backend && xen_pcibk_backend->get)
return xen_pcibk_backend->get(pdev, domain, bus, devfn);
return NULL;
-};
+}
+
/**
* Add for domain0 PCIE-AER handling. Get guest domain/bus/devfn in xen_pcibk
* before sending aer request to pcifront, so that guest could identify
@@ -148,25 +152,29 @@ static inline int xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
return xen_pcibk_backend->find(pcidev, pdev, domain, bus,
devfn);
return -1;
-};
+}
+
static inline int xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
{
if (xen_pcibk_backend && xen_pcibk_backend->init)
return xen_pcibk_backend->init(pdev);
return -1;
-};
+}
+
static inline int xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
publish_pci_root_cb cb)
{
if (xen_pcibk_backend && xen_pcibk_backend->publish)
return xen_pcibk_backend->publish(pdev, cb);
return -1;
-};
+}
+
static inline void xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
{
if (xen_pcibk_backend && xen_pcibk_backend->free)
return xen_pcibk_backend->free(pdev);
-};
+}
+
/* Handles events from front-end */
irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id);
void xen_pcibk_do_op(struct work_struct *data);
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 8c95c3415b7..63616d7453e 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -10,7 +10,6 @@
#include <linux/sched.h>
#include "pciback.h"
-#define DRV_NAME "xen-pciback"
int verbose_request;
module_param(verbose_request, int, 0644);
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
index 4a42cfb0959..46d140baebd 100644
--- a/drivers/xen/xen-pciback/vpci.c
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -8,16 +8,15 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include "pciback.h"
#define PCI_SLOT_MAX 32
-#define DRV_NAME "xen-pciback"
struct vpci_dev_data {
/* Access to dev_list must be protected by lock */
struct list_head dev_list[PCI_SLOT_MAX];
- spinlock_t lock;
+ struct mutex lock;
};
static inline struct list_head *list_first(struct list_head *head)
@@ -33,13 +32,12 @@ static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev_entry *entry;
struct pci_dev *dev = NULL;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
- unsigned long flags;
if (domain != 0 || bus != 0)
return NULL;
if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
- spin_lock_irqsave(&vpci_dev->lock, flags);
+ mutex_lock(&vpci_dev->lock);
list_for_each_entry(entry,
&vpci_dev->dev_list[PCI_SLOT(devfn)],
@@ -50,7 +48,7 @@ static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
}
}
- spin_unlock_irqrestore(&vpci_dev->lock, flags);
+ mutex_unlock(&vpci_dev->lock);
}
return dev;
}
@@ -71,7 +69,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
int err = 0, slot, func = -1;
struct pci_dev_entry *t, *dev_entry;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
- unsigned long flags;
if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
err = -EFAULT;
@@ -90,7 +87,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
dev_entry->dev = dev;
- spin_lock_irqsave(&vpci_dev->lock, flags);
+ mutex_lock(&vpci_dev->lock);
/* Keep multi-function devices together on the virtual PCI bus */
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
@@ -129,7 +126,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
"No more space on root virtual PCI bus");
unlock:
- spin_unlock_irqrestore(&vpci_dev->lock, flags);
+ mutex_unlock(&vpci_dev->lock);
/* Publish this device. */
if (!err)
@@ -145,14 +142,13 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
int slot;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
struct pci_dev *found_dev = NULL;
- unsigned long flags;
- spin_lock_irqsave(&vpci_dev->lock, flags);
+ mutex_lock(&vpci_dev->lock);
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
- struct pci_dev_entry *e, *tmp;
- list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
- list) {
+ struct pci_dev_entry *e;
+
+ list_for_each_entry(e, &vpci_dev->dev_list[slot], list) {
if (e->dev == dev) {
list_del(&e->list);
found_dev = e->dev;
@@ -163,7 +159,7 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
}
out:
- spin_unlock_irqrestore(&vpci_dev->lock, flags);
+ mutex_unlock(&vpci_dev->lock);
if (found_dev)
pcistub_put_pci_dev(found_dev);
@@ -178,7 +174,7 @@ static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
if (!vpci_dev)
return -ENOMEM;
- spin_lock_init(&vpci_dev->lock);
+ mutex_init(&vpci_dev->lock);
for (slot = 0; slot < PCI_SLOT_MAX; slot++)
INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
@@ -222,10 +218,9 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
struct pci_dev_entry *entry;
struct pci_dev *dev = NULL;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
- unsigned long flags;
int found = 0, slot;
- spin_lock_irqsave(&vpci_dev->lock, flags);
+ mutex_lock(&vpci_dev->lock);
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
list_for_each_entry(entry,
&vpci_dev->dev_list[slot],
@@ -243,11 +238,11 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
}
}
}
- spin_unlock_irqrestore(&vpci_dev->lock, flags);
+ mutex_unlock(&vpci_dev->lock);
return found;
}
-struct xen_pcibk_backend xen_pcibk_vpci_backend = {
+const struct xen_pcibk_backend xen_pcibk_vpci_backend = {
.name = "vpci",
.init = __xen_pcibk_init_devices,
.free = __xen_pcibk_release_devices,
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 978d2c6f5dc..474d52ec337 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -13,7 +13,6 @@
#include <asm/xen/pci.h>
#include "pciback.h"
-#define DRV_NAME "xen-pciback"
#define INVALID_EVTCHN_IRQ (-1)
struct workqueue_struct *xen_pcibk_wq;
@@ -44,7 +43,7 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
pdev->xdev = xdev;
dev_set_drvdata(&xdev->dev, pdev);
- spin_lock_init(&pdev->dev_lock);
+ mutex_init(&pdev->dev_lock);
pdev->sh_info = NULL;
pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
@@ -62,14 +61,12 @@ out:
static void xen_pcibk_disconnect(struct xen_pcibk_device *pdev)
{
- spin_lock(&pdev->dev_lock);
-
+ mutex_lock(&pdev->dev_lock);
/* Ensure the guest can't trigger our handler before removing devices */
if (pdev->evtchn_irq != INVALID_EVTCHN_IRQ) {
unbind_from_irqhandler(pdev->evtchn_irq, pdev);
pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
}
- spin_unlock(&pdev->dev_lock);
/* If the driver domain started an op, make sure we complete it
* before releasing the shared memory */
@@ -77,13 +74,11 @@ static void xen_pcibk_disconnect(struct xen_pcibk_device *pdev)
/* Note, the workqueue does not use spinlocks at all.*/
flush_workqueue(xen_pcibk_wq);
- spin_lock(&pdev->dev_lock);
if (pdev->sh_info != NULL) {
xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_info);
pdev->sh_info = NULL;
}
- spin_unlock(&pdev->dev_lock);
-
+ mutex_unlock(&pdev->dev_lock);
}
static void free_pdev(struct xen_pcibk_device *pdev)
@@ -120,9 +115,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
goto out;
}
- spin_lock(&pdev->dev_lock);
pdev->sh_info = vaddr;
- spin_unlock(&pdev->dev_lock);
err = bind_interdomain_evtchn_to_irqhandler(
pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event,
@@ -132,10 +125,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
"Error binding event channel to IRQ");
goto out;
}
-
- spin_lock(&pdev->dev_lock);
pdev->evtchn_irq = err;
- spin_unlock(&pdev->dev_lock);
err = 0;
dev_dbg(&pdev->xdev->dev, "Attached!\n");
@@ -150,6 +140,7 @@ static int xen_pcibk_attach(struct xen_pcibk_device *pdev)
char *magic = NULL;
+ mutex_lock(&pdev->dev_lock);
/* Make sure we only do this setup once */
if (xenbus_read_driver_state(pdev->xdev->nodename) !=
XenbusStateInitialised)
@@ -176,7 +167,7 @@ static int xen_pcibk_attach(struct xen_pcibk_device *pdev)
if (magic == NULL || strcmp(magic, XEN_PCI_MAGIC) != 0) {
xenbus_dev_fatal(pdev->xdev, -EFAULT,
"version mismatch (%s/%s) with pcifront - "
- "halting xen_pcibk",
+ "halting " DRV_NAME,
magic, XEN_PCI_MAGIC);
goto out;
}
@@ -194,6 +185,7 @@ static int xen_pcibk_attach(struct xen_pcibk_device *pdev)
dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err);
out:
+ mutex_unlock(&pdev->dev_lock);
kfree(magic);
@@ -369,6 +361,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
+ mutex_lock(&pdev->dev_lock);
/* Make sure we only reconfigure once */
if (xenbus_read_driver_state(pdev->xdev->nodename) !=
XenbusStateReconfiguring)
@@ -506,6 +499,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
}
out:
+ mutex_unlock(&pdev->dev_lock);
return 0;
}
@@ -562,6 +556,7 @@ static int xen_pcibk_setup_backend(struct xen_pcibk_device *pdev)
char dev_str[64];
char state_str[64];
+ mutex_lock(&pdev->dev_lock);
/* It's possible we could get the call to setup twice, so make sure
* we're not already connected.
*/
@@ -642,10 +637,10 @@ static int xen_pcibk_setup_backend(struct xen_pcibk_device *pdev)
"Error switching to initialised state!");
out:
+ mutex_unlock(&pdev->dev_lock);
if (!err)
/* see if pcifront is already configured (if not, we'll wait) */
xen_pcibk_attach(pdev);
-
return err;
}
@@ -724,7 +719,7 @@ static struct xenbus_driver xenbus_xen_pcibk_driver = {
.otherend_changed = xen_pcibk_frontend_changed,
};
-struct xen_pcibk_backend *xen_pcibk_backend;
+const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend;
int __init xen_pcibk_xenbus_register(void)
{
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 6ea852e2516..d93c70857e0 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -68,6 +68,8 @@
*/
#include <linux/kernel.h>
+#include <linux/bootmem.h>
+#include <linux/swap.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
@@ -93,6 +95,15 @@ static unsigned int selfballoon_uphysteresis __read_mostly = 1;
/* In HZ, controls frequency of worker invocation. */
static unsigned int selfballoon_interval __read_mostly = 5;
+/*
+ * Minimum usable RAM in MB for selfballooning target for balloon.
+ * If non-zero, it is added to totalreserve_pages and self-ballooning
+ * will not balloon below the sum. If zero, a piecewise linear function
+ * is calculated as a minimum and added to totalreserve_pages. Note that
+ * setting this value indiscriminately may cause OOMs and crashes.
+ */
+static unsigned int selfballoon_min_usable_mb;
+
static void selfballoon_process(struct work_struct *work);
static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);
@@ -189,20 +200,23 @@ static int __init xen_selfballooning_setup(char *s)
__setup("selfballooning", xen_selfballooning_setup);
#endif /* CONFIG_FRONTSWAP */
+#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
+
/*
* Use current balloon size, the goal (vm_committed_as), and hysteresis
* parameters to set a new target balloon size
*/
static void selfballoon_process(struct work_struct *work)
{
- unsigned long cur_pages, goal_pages, tgt_pages;
+ unsigned long cur_pages, goal_pages, tgt_pages, floor_pages;
+ unsigned long useful_pages;
bool reset_timer = false;
if (xen_selfballooning_enabled) {
- cur_pages = balloon_stats.current_pages;
+ cur_pages = totalram_pages;
tgt_pages = cur_pages; /* default is no change */
goal_pages = percpu_counter_read_positive(&vm_committed_as) +
- balloon_stats.current_pages - totalram_pages;
+ totalreserve_pages;
#ifdef CONFIG_FRONTSWAP
/* allow space for frontswap pages to be repatriated */
if (frontswap_selfshrinking && frontswap_enabled)
@@ -217,7 +231,26 @@ static void selfballoon_process(struct work_struct *work)
((goal_pages - cur_pages) /
selfballoon_uphysteresis);
/* else if cur_pages == goal_pages, no change */
- balloon_set_new_target(tgt_pages);
+ useful_pages = max_pfn - totalreserve_pages;
+ if (selfballoon_min_usable_mb != 0)
+ floor_pages = totalreserve_pages +
+ MB2PAGES(selfballoon_min_usable_mb);
+ /* piecewise linear function ending in ~3% slope */
+ else if (useful_pages < MB2PAGES(16))
+ floor_pages = max_pfn; /* not worth ballooning */
+ else if (useful_pages < MB2PAGES(64))
+ floor_pages = totalreserve_pages + MB2PAGES(16) +
+ ((useful_pages - MB2PAGES(16)) >> 1);
+ else if (useful_pages < MB2PAGES(512))
+ floor_pages = totalreserve_pages + MB2PAGES(40) +
+ ((useful_pages - MB2PAGES(40)) >> 3);
+ else /* useful_pages >= MB2PAGES(512) */
+ floor_pages = totalreserve_pages + MB2PAGES(99) +
+ ((useful_pages - MB2PAGES(99)) >> 5);
+ if (tgt_pages < floor_pages)
+ tgt_pages = floor_pages;
+ balloon_set_new_target(tgt_pages +
+ balloon_stats.current_pages - totalram_pages);
reset_timer = true;
}
#ifdef CONFIG_FRONTSWAP
@@ -340,6 +373,31 @@ static ssize_t store_selfballoon_uphys(struct sys_device *dev,
static SYSDEV_ATTR(selfballoon_uphysteresis, S_IRUGO | S_IWUSR,
show_selfballoon_uphys, store_selfballoon_uphys);
+SELFBALLOON_SHOW(selfballoon_min_usable_mb, "%d\n",
+ selfballoon_min_usable_mb);
+
+static ssize_t store_selfballoon_min_usable_mb(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+ selfballoon_min_usable_mb = val;
+ return count;
+}
+
+static SYSDEV_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
+ show_selfballoon_min_usable_mb,
+ store_selfballoon_min_usable_mb);
+
+
#ifdef CONFIG_FRONTSWAP
SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking);
@@ -421,6 +479,7 @@ static struct attribute *selfballoon_attrs[] = {
&attr_selfballoon_interval.attr,
&attr_selfballoon_downhysteresis.attr,
&attr_selfballoon_uphysteresis.attr,
+ &attr_selfballoon_min_usable_mb.attr,
#ifdef CONFIG_FRONTSWAP
&attr_frontswap_selfshrinking.attr,
&attr_frontswap_hysteresis.attr,
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index 090c61ee8fd..2eff7a6aaa2 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -212,7 +212,9 @@ int xb_init_comms(void)
printk(KERN_WARNING "XENBUS response ring is not quiescent "
"(%08x:%08x): fixing up\n",
intf->rsp_cons, intf->rsp_prod);
- intf->rsp_cons = intf->rsp_prod;
+ /* breaks kdump */
+ if (!reset_devices)
+ intf->rsp_cons = intf->rsp_prod;
}
if (xenbus_irq) {
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index bd2f90c9ac8..cef9b0bf63d 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -684,64 +684,74 @@ static int __init xenbus_probe_initcall(void)
device_initcall(xenbus_probe_initcall);
-static int __init xenbus_init(void)
+/* Set up event channel for xenstored which is run as a local process
+ * (this is normally used only in dom0)
+ */
+static int __init xenstored_local_init(void)
{
int err = 0;
unsigned long page = 0;
+ struct evtchn_alloc_unbound alloc_unbound;
- DPRINTK("");
+ /* Allocate Xenstore page */
+ page = get_zeroed_page(GFP_KERNEL);
+ if (!page)
+ goto out_err;
- err = -ENODEV;
- if (!xen_domain())
- return err;
+ xen_store_mfn = xen_start_info->store_mfn =
+ pfn_to_mfn(virt_to_phys((void *)page) >>
+ PAGE_SHIFT);
- /*
- * Domain0 doesn't have a store_evtchn or store_mfn yet.
- */
- if (xen_initial_domain()) {
- struct evtchn_alloc_unbound alloc_unbound;
+ /* Next allocate a local port which xenstored can bind to */
+ alloc_unbound.dom = DOMID_SELF;
+ alloc_unbound.remote_dom = DOMID_SELF;
- /* Allocate Xenstore page */
- page = get_zeroed_page(GFP_KERNEL);
- if (!page)
- goto out_error;
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
+ &alloc_unbound);
+ if (err == -ENOSYS)
+ goto out_err;
- xen_store_mfn = xen_start_info->store_mfn =
- pfn_to_mfn(virt_to_phys((void *)page) >>
- PAGE_SHIFT);
+ BUG_ON(err);
+ xen_store_evtchn = xen_start_info->store_evtchn =
+ alloc_unbound.port;
- /* Next allocate a local port which xenstored can bind to */
- alloc_unbound.dom = DOMID_SELF;
- alloc_unbound.remote_dom = 0;
+ return 0;
- err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
- &alloc_unbound);
- if (err == -ENOSYS)
- goto out_error;
+ out_err:
+ if (page != 0)
+ free_page(page);
+ return err;
+}
- BUG_ON(err);
- xen_store_evtchn = xen_start_info->store_evtchn =
- alloc_unbound.port;
+static int __init xenbus_init(void)
+{
+ int err = 0;
- xen_store_interface = mfn_to_virt(xen_store_mfn);
+ if (!xen_domain())
+ return -ENODEV;
+
+ if (xen_hvm_domain()) {
+ uint64_t v = 0;
+ err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
+ if (err)
+ goto out_error;
+ xen_store_evtchn = (int)v;
+ err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
+ if (err)
+ goto out_error;
+ xen_store_mfn = (unsigned long)v;
+ xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
} else {
- if (xen_hvm_domain()) {
- uint64_t v = 0;
- err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
- if (err)
- goto out_error;
- xen_store_evtchn = (int)v;
- err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
+ xen_store_evtchn = xen_start_info->store_evtchn;
+ xen_store_mfn = xen_start_info->store_mfn;
+ if (xen_store_evtchn)
+ xenstored_ready = 1;
+ else {
+ err = xenstored_local_init();
if (err)
goto out_error;
- xen_store_mfn = (unsigned long)v;
- xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
- } else {
- xen_store_evtchn = xen_start_info->store_evtchn;
- xen_store_mfn = xen_start_info->store_mfn;
- xen_store_interface = mfn_to_virt(xen_store_mfn);
- xenstored_ready = 1;
}
+ xen_store_interface = mfn_to_virt(xen_store_mfn);
}
/* Initialize the interface to xenstore. */
@@ -760,12 +770,7 @@ static int __init xenbus_init(void)
proc_mkdir("xen", NULL);
#endif
- return 0;
-
- out_error:
- if (page != 0)
- free_page(page);
-
+ out_error:
return err;
}
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 60adf919d78..32417b5064f 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -104,8 +104,6 @@ static int xenbus_uevent_backend(struct device *dev,
xdev = to_xenbus_device(dev);
bus = container_of(xdev->dev.bus, struct xen_bus_type, bus);
- if (xdev == NULL)
- return -ENODEV;
if (add_uevent_var(env, "MODALIAS=xen-backend:%s", xdev->devicetype))
return -ENOMEM;
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index ed2ba474a56..540587e18a9 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -248,10 +248,131 @@ int __xenbus_register_frontend(struct xenbus_driver *drv,
}
EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq);
+static int backend_state;
+
+static void xenbus_reset_backend_state_changed(struct xenbus_watch *w,
+ const char **v, unsigned int l)
+{
+ xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state);
+ printk(KERN_DEBUG "XENBUS: backend %s %s\n",
+ v[XS_WATCH_PATH], xenbus_strstate(backend_state));
+ wake_up(&backend_state_wq);
+}
+
+static void xenbus_reset_wait_for_backend(char *be, int expected)
+{
+ long timeout;
+ timeout = wait_event_interruptible_timeout(backend_state_wq,
+ backend_state == expected, 5 * HZ);
+ if (timeout <= 0)
+ printk(KERN_INFO "XENBUS: backend %s timed out.\n", be);
+}
+
+/*
+ * Reset frontend if it is in Connected or Closed state.
+ * Wait for backend to catch up.
+ * State Connected happens during kdump, Closed after kexec.
+ */
+static void xenbus_reset_frontend(char *fe, char *be, int be_state)
+{
+ struct xenbus_watch be_watch;
+
+ printk(KERN_DEBUG "XENBUS: backend %s %s\n",
+ be, xenbus_strstate(be_state));
+
+ memset(&be_watch, 0, sizeof(be_watch));
+ be_watch.node = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/state", be);
+ if (!be_watch.node)
+ return;
+
+ be_watch.callback = xenbus_reset_backend_state_changed;
+ backend_state = XenbusStateUnknown;
+
+ printk(KERN_INFO "XENBUS: triggering reconnect on %s\n", be);
+ register_xenbus_watch(&be_watch);
+
+ /* fall through to forward backend to state XenbusStateInitialising */
+ switch (be_state) {
+ case XenbusStateConnected:
+ xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing);
+ xenbus_reset_wait_for_backend(be, XenbusStateClosing);
+
+ case XenbusStateClosing:
+ xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed);
+ xenbus_reset_wait_for_backend(be, XenbusStateClosed);
+
+ case XenbusStateClosed:
+ xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising);
+ xenbus_reset_wait_for_backend(be, XenbusStateInitWait);
+ }
+
+ unregister_xenbus_watch(&be_watch);
+ printk(KERN_INFO "XENBUS: reconnect done on %s\n", be);
+ kfree(be_watch.node);
+}
+
+static void xenbus_check_frontend(char *class, char *dev)
+{
+ int be_state, fe_state, err;
+ char *backend, *frontend;
+
+ frontend = kasprintf(GFP_NOIO | __GFP_HIGH, "device/%s/%s", class, dev);
+ if (!frontend)
+ return;
+
+ err = xenbus_scanf(XBT_NIL, frontend, "state", "%i", &fe_state);
+ if (err != 1)
+ goto out;
+
+ switch (fe_state) {
+ case XenbusStateConnected:
+ case XenbusStateClosed:
+ printk(KERN_DEBUG "XENBUS: frontend %s %s\n",
+ frontend, xenbus_strstate(fe_state));
+ backend = xenbus_read(XBT_NIL, frontend, "backend", NULL);
+ if (!backend || IS_ERR(backend))
+ goto out;
+ err = xenbus_scanf(XBT_NIL, backend, "state", "%i", &be_state);
+ if (err == 1)
+ xenbus_reset_frontend(frontend, backend, be_state);
+ kfree(backend);
+ break;
+ default:
+ break;
+ }
+out:
+ kfree(frontend);
+}
+
+static void xenbus_reset_state(void)
+{
+ char **devclass, **dev;
+ int devclass_n, dev_n;
+ int i, j;
+
+ devclass = xenbus_directory(XBT_NIL, "device", "", &devclass_n);
+ if (IS_ERR(devclass))
+ return;
+
+ for (i = 0; i < devclass_n; i++) {
+ dev = xenbus_directory(XBT_NIL, "device", devclass[i], &dev_n);
+ if (IS_ERR(dev))
+ continue;
+ for (j = 0; j < dev_n; j++)
+ xenbus_check_frontend(devclass[i], dev[j]);
+ kfree(dev);
+ }
+ kfree(devclass);
+}
+
static int frontend_probe_and_watch(struct notifier_block *notifier,
unsigned long event,
void *data)
{
+ /* reset devices in Connected or Closed state */
+ if (xen_hvm_domain())
+ xenbus_reset_state();
/* Enumerate devices in xenstore and watch for changes. */
xenbus_probe_devices(&xenbus_frontend);
register_xenbus_watch(&fe_watch);
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 5534690075a..b3b8f2f3ad1 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -45,6 +45,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <xen/xenbus.h>
+#include <xen/xen.h>
#include "xenbus_comms.h"
struct xs_stored_msg {
@@ -620,6 +621,15 @@ static struct xenbus_watch *find_watch(const char *token)
return NULL;
}
+static void xs_reset_watches(void)
+{
+ int err;
+
+ err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
+ if (err && err != -EEXIST)
+ printk(KERN_WARNING "xs_reset_watches failed: %d\n", err);
+}
+
/* Register callback to watch this node. */
int register_xenbus_watch(struct xenbus_watch *watch)
{
@@ -638,8 +648,7 @@ int register_xenbus_watch(struct xenbus_watch *watch)
err = xs_watch(watch->node, token);
- /* Ignore errors due to multiple registration. */
- if ((err != 0) && (err != -EEXIST)) {
+ if (err) {
spin_lock(&watches_lock);
list_del(&watch->list);
spin_unlock(&watches_lock);
@@ -897,5 +906,9 @@ int xs_init(void)
if (IS_ERR(task))
return PTR_ERR(task);
+ /* shutdown watches for kexec boot */
+ if (xen_hvm_domain())
+ xs_reset_watches();
+
return 0;
}
diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c
index 7ee2b6e7178..229624f867d 100644
--- a/drivers/zorro/zorro-driver.c
+++ b/drivers/zorro/zorro-driver.c
@@ -37,6 +37,7 @@ zorro_match_device(const struct zorro_device_id *ids,
}
return NULL;
}
+EXPORT_SYMBOL(zorro_match_device);
static int zorro_device_probe(struct device *dev)
@@ -91,6 +92,7 @@ int zorro_register_driver(struct zorro_driver *drv)
/* register with core */
return driver_register(&drv->driver);
}
+EXPORT_SYMBOL(zorro_register_driver);
/**
@@ -107,6 +109,7 @@ void zorro_unregister_driver(struct zorro_driver *drv)
{
driver_unregister(&drv->driver);
}
+EXPORT_SYMBOL(zorro_unregister_driver);
/**
@@ -168,6 +171,7 @@ struct bus_type zorro_bus_type = {
.probe = zorro_device_probe,
.remove = zorro_device_remove,
};
+EXPORT_SYMBOL(zorro_bus_type);
static int __init zorro_driver_init(void)
@@ -177,7 +181,3 @@ static int __init zorro_driver_init(void)
postcore_initcall(zorro_driver_init);
-EXPORT_SYMBOL(zorro_match_device);
-EXPORT_SYMBOL(zorro_register_driver);
-EXPORT_SYMBOL(zorro_unregister_driver);
-EXPORT_SYMBOL(zorro_bus_type);
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index e0c2807b097..181fa8158a8 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -148,10 +148,10 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, bus);
- /* Register all devices */
pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n",
zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
+ /* First identify all devices ... */
for (i = 0; i < zorro_num_autocon; i++) {
z = &zorro_autocon[i];
z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8);
@@ -172,6 +172,11 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
dev_set_name(&z->dev, "%02x", i);
z->dev.parent = &bus->dev;
z->dev.bus = &zorro_bus_type;
+ }
+
+ /* ... then register them */
+ for (i = 0; i < zorro_num_autocon; i++) {
+ z = &zorro_autocon[i];
error = device_register(&z->dev);
if (error) {
dev_err(&bus->dev, "Error registering device %s\n",
diff --git a/fs/attr.c b/fs/attr.c
index 538e27959d3..7ee7ba48831 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -13,6 +13,7 @@
#include <linux/fsnotify.h>
#include <linux/fcntl.h>
#include <linux/security.h>
+#include <linux/evm.h>
/**
* inode_change_ok - check if attribute changes to an inode are allowed
@@ -237,8 +238,10 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
else
error = simple_setattr(dentry, attr);
- if (!error)
+ if (!error) {
fsnotify_change(dentry, ia_valid);
+ evm_inode_post_setattr(dentry, ia_valid);
+ }
return error;
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 3c3abff731a..e4e57d59edb 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1036,11 +1036,13 @@ out:
* on error we return an unlocked page and the error value
* on success we return a locked page and 0
*/
-static int prepare_uptodate_page(struct page *page, u64 pos)
+static int prepare_uptodate_page(struct page *page, u64 pos,
+ bool force_uptodate)
{
int ret = 0;
- if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) {
+ if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
+ !PageUptodate(page)) {
ret = btrfs_readpage(NULL, page);
if (ret)
return ret;
@@ -1061,7 +1063,7 @@ static int prepare_uptodate_page(struct page *page, u64 pos)
static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
struct page **pages, size_t num_pages,
loff_t pos, unsigned long first_index,
- size_t write_bytes)
+ size_t write_bytes, bool force_uptodate)
{
struct extent_state *cached_state = NULL;
int i;
@@ -1086,10 +1088,11 @@ again:
}
if (i == 0)
- err = prepare_uptodate_page(pages[i], pos);
+ err = prepare_uptodate_page(pages[i], pos,
+ force_uptodate);
if (i == num_pages - 1)
err = prepare_uptodate_page(pages[i],
- pos + write_bytes);
+ pos + write_bytes, false);
if (err) {
page_cache_release(pages[i]);
faili = i - 1;
@@ -1158,6 +1161,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
size_t num_written = 0;
int nrptrs;
int ret = 0;
+ bool force_page_uptodate = false;
nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
@@ -1200,7 +1204,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
* contents of pages from loop to loop
*/
ret = prepare_pages(root, file, pages, num_pages,
- pos, first_index, write_bytes);
+ pos, first_index, write_bytes,
+ force_page_uptodate);
if (ret) {
btrfs_delalloc_release_space(inode,
num_pages << PAGE_CACHE_SHIFT);
@@ -1217,12 +1222,15 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
if (copied < write_bytes)
nrptrs = 1;
- if (copied == 0)
+ if (copied == 0) {
+ force_page_uptodate = true;
dirty_pages = 0;
- else
+ } else {
+ force_page_uptodate = false;
dirty_pages = (copied + offset +
PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
+ }
/*
* If we had a short copy we need to release the excess delaloc
@@ -1817,6 +1825,11 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
goto out;
case SEEK_DATA:
case SEEK_HOLE:
+ if (offset >= i_size_read(inode)) {
+ mutex_unlock(&inode->i_mutex);
+ return -ENXIO;
+ }
+
ret = find_desired_extent(inode, &offset, origin);
if (ret) {
mutex_unlock(&inode->i_mutex);
@@ -1825,11 +1838,11 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
}
if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
- ret = -EINVAL;
+ offset = -EINVAL;
goto out;
}
if (offset > inode->i_sb->s_maxbytes) {
- ret = -EINVAL;
+ offset = -EINVAL;
goto out;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 4d14de6d121..b2d004ad66a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4018,7 +4018,8 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
kfree(dentry->d_fsdata);
dentry->d_fsdata = NULL;
- d_clear_need_lookup(dentry);
+ /* This thing is hashed, drop it for now */
+ d_drop(dentry);
} else {
ret = btrfs_inode_by_name(dir, dentry, &location);
}
@@ -4085,7 +4086,15 @@ static void btrfs_dentry_release(struct dentry *dentry)
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
- return d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
+ struct dentry *ret;
+
+ ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
+ if (unlikely(d_need_lookup(dentry))) {
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
+ spin_unlock(&dentry->d_lock);
+ }
+ return ret;
}
unsigned char btrfs_filetype_table[] = {
@@ -4125,7 +4134,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
/* special case for "." */
if (filp->f_pos == 0) {
- over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR);
+ over = filldir(dirent, ".", 1,
+ filp->f_pos, btrfs_ino(inode), DT_DIR);
if (over)
return 0;
filp->f_pos = 1;
@@ -4134,7 +4144,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
if (filp->f_pos == 1) {
u64 pino = parent_ino(filp->f_path.dentry);
over = filldir(dirent, "..", 2,
- 2, pino, DT_DIR);
+ filp->f_pos, pino, DT_DIR);
if (over)
return 0;
filp->f_pos = 2;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 3351b1b2457..dae5dfe41ba 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1047,7 +1047,16 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
if (!max_to_defrag)
max_to_defrag = last_index - 1;
- while (i <= last_index && defrag_count < max_to_defrag) {
+ /*
+ * make writeback starts from i, so the defrag range can be
+ * written sequentially.
+ */
+ if (i < inode->i_mapping->writeback_index)
+ inode->i_mapping->writeback_index = i;
+
+ while (i <= last_index && defrag_count < max_to_defrag &&
+ (i < (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT)) {
/*
* make sure we stop running if someone unmounts
* the FS
@@ -2177,6 +2186,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
if (!(src_file->f_mode & FMODE_READ))
goto out_fput;
+ /* don't make the dst file partly checksummed */
+ if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
+ (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
+ goto out_fput;
+
ret = -EISDIR;
if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
goto out_fput;
@@ -2226,6 +2240,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
goto out_unlock;
}
+ /* truncate page cache pages from target inode range */
+ truncate_inode_pages_range(&inode->i_data, destoff,
+ PAGE_CACHE_ALIGN(destoff + len) - 1);
+
/* do any pending delalloc/csum calc on src, one way or
another, and lock file content */
while (1) {
@@ -2242,10 +2260,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
btrfs_wait_ordered_range(src, off, len);
}
- /* truncate page cache pages from target inode range */
- truncate_inode_pages_range(&inode->i_data, off,
- ALIGN(off + len, PAGE_CACHE_SIZE) - 1);
-
/* clone data */
key.objectid = btrfs_ino(src);
key.type = BTRFS_EXTENT_DATA_KEY;
@@ -2323,7 +2337,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
else
new_key.offset = destoff;
- trans = btrfs_start_transaction(root, 1);
+ /*
+ * 1 - adjusting old extent (we may have to split it)
+ * 1 - add new extent
+ * 1 - inode update
+ */
+ trans = btrfs_start_transaction(root, 3);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
@@ -2442,7 +2461,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
if (endoff > inode->i_size)
btrfs_i_size_write(inode, endoff);
- BTRFS_I(inode)->flags = BTRFS_I(src)->flags;
ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret);
btrfs_end_transaction(trans, root);
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 69565e5fc6a..426aa464f1a 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -383,36 +383,36 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
XATTR_REPLACE);
}
-int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
- struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+int btrfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int err;
- size_t len;
- void *value;
- char *suffix;
+ const struct xattr *xattr;
+ struct btrfs_trans_handle *trans = fs_info;
char *name;
+ int err = 0;
- err = security_inode_init_security(inode, dir, qstr, &suffix, &value,
- &len);
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
- }
-
- name = kmalloc(XATTR_SECURITY_PREFIX_LEN + strlen(suffix) + 1,
- GFP_NOFS);
- if (!name) {
- err = -ENOMEM;
- } else {
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
+ strlen(xattr->name) + 1, GFP_NOFS);
+ if (!name) {
+ err = -ENOMEM;
+ break;
+ }
strcpy(name, XATTR_SECURITY_PREFIX);
- strcpy(name + XATTR_SECURITY_PREFIX_LEN, suffix);
- err = __btrfs_setxattr(trans, inode, name, value, len, 0);
+ strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
+ err = __btrfs_setxattr(trans, inode, name,
+ xattr->value, xattr->value_len, 0);
kfree(name);
+ if (err < 0)
+ break;
}
-
- kfree(suffix);
- kfree(value);
return err;
}
+
+int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &btrfs_initxattrs, trans);
+}
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index e76bfeb6826..30acd22147e 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -351,9 +351,7 @@ static int
build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
{
unsigned int dlen;
- unsigned int wlen;
- unsigned int size = 6 * sizeof(struct ntlmssp2_name);
- __le64 curtime;
+ unsigned int size = 2 * sizeof(struct ntlmssp2_name);
char *defdmname = "WORKGROUP";
unsigned char *blobptr;
struct ntlmssp2_name *attrptr;
@@ -365,15 +363,14 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
}
dlen = strlen(ses->domainName);
- wlen = strlen(ses->server->hostname);
- /* The length of this blob is a size which is
- * six times the size of a structure which holds name/size +
- * two times the unicode length of a domain name +
- * two times the unicode length of a server name +
- * size of a timestamp (which is 8 bytes).
+ /*
+ * The length of this blob is two times the size of a
+ * structure (av pair) which holds name/size
+ * ( for NTLMSSP_AV_NB_DOMAIN_NAME followed by NTLMSSP_AV_EOL ) +
+ * unicode length of a netbios domain name
*/
- ses->auth_key.len = size + 2 * (2 * dlen) + 2 * (2 * wlen) + 8;
+ ses->auth_key.len = size + 2 * dlen;
ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL);
if (!ses->auth_key.response) {
ses->auth_key.len = 0;
@@ -384,44 +381,15 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
blobptr = ses->auth_key.response;
attrptr = (struct ntlmssp2_name *) blobptr;
+ /*
+ * As defined in MS-NTLM 3.3.2, just this av pair field
+ * is sufficient as part of the temp
+ */
attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME);
attrptr->length = cpu_to_le16(2 * dlen);
blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
- blobptr += 2 * dlen;
- attrptr = (struct ntlmssp2_name *) blobptr;
-
- attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_COMPUTER_NAME);
- attrptr->length = cpu_to_le16(2 * wlen);
- blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
- cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp);
-
- blobptr += 2 * wlen;
- attrptr = (struct ntlmssp2_name *) blobptr;
-
- attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_DOMAIN_NAME);
- attrptr->length = cpu_to_le16(2 * dlen);
- blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
- cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
-
- blobptr += 2 * dlen;
- attrptr = (struct ntlmssp2_name *) blobptr;
-
- attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_COMPUTER_NAME);
- attrptr->length = cpu_to_le16(2 * wlen);
- blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
- cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp);
-
- blobptr += 2 * wlen;
- attrptr = (struct ntlmssp2_name *) blobptr;
-
- attrptr->type = cpu_to_le16(NTLMSSP_AV_TIMESTAMP);
- attrptr->length = cpu_to_le16(sizeof(__le64));
- blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
- curtime = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
- memcpy(blobptr, &curtime, sizeof(__le64));
-
return 0;
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index f93eb948d07..54b8f1e7da9 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -548,6 +548,12 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
struct inode *dir = dentry->d_inode;
struct dentry *child;
+ if (!dir) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOENT);
+ break;
+ }
+
/* skip separators */
while (*s == sep)
s++;
@@ -563,10 +569,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
mutex_unlock(&dir->i_mutex);
dput(dentry);
dentry = child;
- if (!dentry->d_inode) {
- dput(dentry);
- dentry = ERR_PTR(-ENOENT);
- }
} while (!IS_ERR(dentry));
_FreeXid(xid);
kfree(full_path);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index aac37d99a48..a80f7bd97b9 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -4079,7 +4079,8 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
T2_FNEXT_RSP_PARMS *parms;
char *response_data;
int rc = 0;
- int bytes_returned, name_len;
+ int bytes_returned;
+ unsigned int name_len;
__u16 params, byte_count;
cFYI(1, "In FindNext");
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 9bb4b10f0cd..62abf9fd6ff 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1298,7 +1298,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
/* ignore */
} else if (strnicmp(data, "guest", 5) == 0) {
/* ignore */
- } else if (strnicmp(data, "rw", 2) == 0) {
+ } else if (strnicmp(data, "rw", 2) == 0 && strlen(data) == 2) {
/* ignore */
} else if (strnicmp(data, "ro", 2) == 0) {
/* ignore */
@@ -1401,7 +1401,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
vol->server_ino = 1;
} else if (strnicmp(data, "noserverino", 9) == 0) {
vol->server_ino = 0;
- } else if (strnicmp(data, "rwpidforward", 4) == 0) {
+ } else if (strnicmp(data, "rwpidforward", 12) == 0) {
vol->rwpidforward = 1;
} else if (strnicmp(data, "cifsacl", 7) == 0) {
vol->cifs_acl = 1;
@@ -2018,7 +2018,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
warned_on_ntlm = true;
cERROR(1, "default security mechanism requested. The default "
"security mechanism will be upgraded from ntlm to "
- "ntlmv2 in kernel release 3.1");
+ "ntlmv2 in kernel release 3.2");
}
ses->overrideSecFlg = volume_info->secFlg;
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 2a22fb2989e..c3230888214 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -22,6 +22,7 @@
#include <linux/fs.h>
#include <linux/posix_acl_xattr.h>
#include <linux/slab.h>
+#include <linux/xattr.h>
#include "cifsfs.h"
#include "cifspdu.h"
#include "cifsglob.h"
@@ -31,16 +32,8 @@
#define MAX_EA_VALUE_SIZE 65535
#define CIFS_XATTR_DOS_ATTRIB "user.DosAttrib"
#define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
-#define CIFS_XATTR_USER_PREFIX "user."
-#define CIFS_XATTR_SYSTEM_PREFIX "system."
-#define CIFS_XATTR_OS2_PREFIX "os2."
-#define CIFS_XATTR_SECURITY_PREFIX "security."
-#define CIFS_XATTR_TRUSTED_PREFIX "trusted."
-#define XATTR_TRUSTED_PREFIX_LEN 8
-#define XATTR_SECURITY_PREFIX_LEN 9
-/* BB need to add server (Samba e.g) support for security and trusted prefix */
-
+/* BB need to add server (Samba e.g) support for security and trusted prefix */
int cifs_removexattr(struct dentry *direntry, const char *ea_name)
{
@@ -76,8 +69,8 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
}
if (ea_name == NULL) {
cFYI(1, "Null xattr names not supported");
- } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5)
- && (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4))) {
+ } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)
+ && (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN))) {
cFYI(1,
"illegal xattr request %s (only user namespace supported)",
ea_name);
@@ -88,7 +81,7 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto remove_ea_exit;
- ea_name += 5; /* skip past user. prefix */
+ ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, NULL,
(__u16)0, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -149,21 +142,23 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
if (ea_name == NULL) {
cFYI(1, "Null xattr names not supported");
- } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
+ } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)
+ == 0) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto set_ea_exit;
if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0)
cFYI(1, "attempt to set cifs inode metadata");
- ea_name += 5; /* skip past user. prefix */
+ ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
(__u16)value_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
- } else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
+ } else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)
+ == 0) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto set_ea_exit;
- ea_name += 4; /* skip past os2. prefix */
+ ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */
rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
(__u16)value_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -269,7 +264,8 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
/* return alt name if available as pseudo attr */
if (ea_name == NULL) {
cFYI(1, "Null xattr names not supported");
- } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
+ } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)
+ == 0) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto get_ea_exit;
@@ -277,15 +273,15 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
cFYI(1, "attempt to query cifs inode metadata");
/* revalidate/getattr then populate from inode */
} /* BB add else when above is implemented */
- ea_name += 5; /* skip past user. prefix */
+ ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
buf_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
- } else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
+ } else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto get_ea_exit;
- ea_name += 4; /* skip past os2. prefix */
+ ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */
rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
buf_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -339,10 +335,10 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
cFYI(1, "Query CIFS ACL not supported yet");
#endif /* CONFIG_CIFS_ACL */
} else if (strncmp(ea_name,
- CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) {
+ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) {
cFYI(1, "Trusted xattr namespace not supported yet");
} else if (strncmp(ea_name,
- CIFS_XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) {
+ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) {
cFYI(1, "Security xattr namespace not supported yet");
} else
cFYI(1,
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index 5d979b4347b..c922adc8ef4 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -46,28 +46,30 @@ ext2_xattr_security_set(struct dentry *dentry, const char *name,
value, size, flags);
}
-int
-ext2_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+int ext2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int err;
- size_t len;
- void *value;
- char *name;
+ const struct xattr *xattr;
+ int err = 0;
- err = security_inode_init_security(inode, dir, qstr, &name, &value, &len);
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = ext2_xattr_set(inode, EXT2_XATTR_INDEX_SECURITY,
+ xattr->name, xattr->value,
+ xattr->value_len, 0);
+ if (err < 0)
+ break;
}
- err = ext2_xattr_set(inode, EXT2_XATTR_INDEX_SECURITY,
- name, value, len, 0);
- kfree(name);
- kfree(value);
return err;
}
+int
+ext2_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &ext2_initxattrs, NULL);
+}
+
const struct xattr_handler ext2_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.list = ext2_xattr_security_list,
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 04da6acde85..12661e1deed 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1134,7 +1134,7 @@ struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
return bh;
if (buffer_uptodate(bh))
return bh;
- ll_rw_block(READ_META, 1, &bh);
+ ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
@@ -2807,7 +2807,7 @@ make_io:
trace_ext3_load_inode(inode);
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
- submit_bh(READ_META, bh);
+ submit_bh(READ | REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
ext3_error(inode->i_sb, "ext3_get_inode_loc",
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 5571708b6a5..0629e09f651 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -922,7 +922,8 @@ restart:
bh = ext3_getblk(NULL, dir, b++, 0, &err);
bh_use[ra_max] = bh;
if (bh)
- ll_rw_block(READ_META, 1, &bh);
+ ll_rw_block(READ | REQ_META | REQ_PRIO,
+ 1, &bh);
}
}
if ((bh = bh_use[ra_ptr++]) == NULL)
diff --git a/fs/ext3/xattr_security.c b/fs/ext3/xattr_security.c
index b8d9f83aa5c..3c218b8a51d 100644
--- a/fs/ext3/xattr_security.c
+++ b/fs/ext3/xattr_security.c
@@ -48,28 +48,32 @@ ext3_xattr_security_set(struct dentry *dentry, const char *name,
name, value, size, flags);
}
-int
-ext3_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+int ext3_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int err;
- size_t len;
- void *value;
- char *name;
+ const struct xattr *xattr;
+ handle_t *handle = fs_info;
+ int err = 0;
- err = security_inode_init_security(inode, dir, qstr, &name, &value, &len);
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = ext3_xattr_set_handle(handle, inode,
+ EXT3_XATTR_INDEX_SECURITY,
+ xattr->name, xattr->value,
+ xattr->value_len, 0);
+ if (err < 0)
+ break;
}
- err = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_SECURITY,
- name, value, len, 0);
- kfree(name);
- kfree(value);
return err;
}
+int
+ext3_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &ext3_initxattrs, handle);
+}
+
const struct xattr_handler ext3_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.list = ext3_xattr_security_list,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 18d2558b762..986e2388f03 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -647,7 +647,7 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
return bh;
if (buffer_uptodate(bh))
return bh;
- ll_rw_block(READ_META, 1, &bh);
+ ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
@@ -3298,7 +3298,7 @@ make_io:
trace_ext4_load_inode(inode);
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
- submit_bh(READ_META, bh);
+ submit_bh(READ | REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
EXT4_ERROR_INODE_BLOCK(inode, block,
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index f8068c7bae9..1c924faeb6c 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -922,7 +922,8 @@ restart:
bh = ext4_getblk(NULL, dir, b++, 0, &err);
bh_use[ra_max] = bh;
if (bh)
- ll_rw_block(READ_META, 1, &bh);
+ ll_rw_block(READ | REQ_META | REQ_PRIO,
+ 1, &bh);
}
}
if ((bh = bh_use[ra_ptr++]) == NULL)
diff --git a/fs/ext4/xattr_security.c b/fs/ext4/xattr_security.c
index 007c3bfbf09..34e4350dd4d 100644
--- a/fs/ext4/xattr_security.c
+++ b/fs/ext4/xattr_security.c
@@ -48,28 +48,32 @@ ext4_xattr_security_set(struct dentry *dentry, const char *name,
name, value, size, flags);
}
-int
-ext4_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+int ext4_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int err;
- size_t len;
- void *value;
- char *name;
+ const struct xattr *xattr;
+ handle_t *handle = fs_info;
+ int err = 0;
- err = security_inode_init_security(inode, dir, qstr, &name, &value, &len);
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = ext4_xattr_set_handle(handle, inode,
+ EXT4_XATTR_INDEX_SECURITY,
+ xattr->name, xattr->value,
+ xattr->value_len, 0);
+ if (err < 0)
+ break;
}
- err = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_SECURITY,
- name, value, len, 0);
- kfree(name);
- kfree(value);
return err;
}
+int
+ext4_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &ext4_initxattrs, handle);
+}
+
const struct xattr_handler ext4_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.list = ext4_xattr_security_list,
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 900cf986aad..6525b804d5e 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -624,31 +624,29 @@ fail:
return error;
}
-static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip,
- const struct qstr *qstr)
+int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int err;
- size_t len;
- void *value;
- char *name;
-
- err = security_inode_init_security(&ip->i_inode, &dip->i_inode, qstr,
- &name, &value, &len);
-
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
+ const struct xattr *xattr;
+ int err = 0;
+
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = __gfs2_xattr_set(inode, xattr->name, xattr->value,
+ xattr->value_len, 0,
+ GFS2_EATYPE_SECURITY);
+ if (err < 0)
+ break;
}
-
- err = __gfs2_xattr_set(&ip->i_inode, name, value, len, 0,
- GFS2_EATYPE_SECURITY);
- kfree(value);
- kfree(name);
-
return err;
}
+static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(&ip->i_inode, &dip->i_inode, qstr,
+ &gfs2_initxattrs, NULL);
+}
+
/**
* gfs2_create_inode - Create a new inode
* @dir: The parent directory
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 85c62923ee2..59864643436 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -624,9 +624,9 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
- submit_bh(WRITE_SYNC | REQ_META, bh);
+ submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
else
- submit_bh(WRITE_FLUSH_FUA | REQ_META, bh);
+ submit_bh(WRITE_FLUSH_FUA | REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 747238cd9f9..be29858900f 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -37,7 +37,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
{
struct buffer_head *bh, *head;
int nr_underway = 0;
- int write_op = REQ_META |
+ int write_op = REQ_META | REQ_PRIO |
(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
BUG_ON(!PageLocked(page));
@@ -225,7 +225,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
}
bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
- submit_bh(READ_SYNC | REQ_META, bh);
+ submit_bh(READ_SYNC | REQ_META | REQ_PRIO, bh);
if (!(flags & DIO_WAIT))
return 0;
@@ -435,7 +435,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
if (buffer_uptodate(first_bh))
goto out;
if (!buffer_locked(first_bh))
- ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
+ ll_rw_block(READ_SYNC | REQ_META | REQ_PRIO, 1, &first_bh);
dblock++;
extlen--;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 3bc073a4cf8..079587e5384 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -224,7 +224,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
bio->bi_end_io = end_bio_io_page;
bio->bi_private = page;
- submit_bio(READ_SYNC | REQ_META, bio);
+ submit_bio(READ_SYNC | REQ_META | REQ_PRIO, bio);
wait_on_page_locked(page);
bio_put(bio);
if (!PageUptodate(page)) {
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 42e8d23bc04..0e8bb13381e 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -709,7 +709,7 @@ get_a_page:
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) {
- ll_rw_block(READ_META, 1, &bh);
+ ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
goto unlock_out;
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index c106ca22e81..d24a9b666a2 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -344,6 +344,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
struct inode *root, *inode;
struct qstr str;
struct nls_table *nls = NULL;
+ u64 last_fs_block, last_fs_page;
int err;
err = -EINVAL;
@@ -399,9 +400,13 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
if (!sbi->rsrc_clump_blocks)
sbi->rsrc_clump_blocks = 1;
- err = generic_check_addressable(sbi->alloc_blksz_shift,
- sbi->total_blocks);
- if (err) {
+ err = -EFBIG;
+ last_fs_block = sbi->total_blocks - 1;
+ last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >>
+ PAGE_CACHE_SHIFT;
+
+ if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) ||
+ (last_fs_page > (pgoff_t)(~0ULL))) {
printk(KERN_ERR "hfs: filesystem size too large.\n");
goto out_free_vhdr;
}
@@ -525,8 +530,8 @@ out_close_cat_tree:
out_close_ext_tree:
hfs_btree_close(sbi->ext_tree);
out_free_vhdr:
- kfree(sbi->s_vhdr);
- kfree(sbi->s_backup_vhdr);
+ kfree(sbi->s_vhdr_buf);
+ kfree(sbi->s_backup_vhdr_buf);
out_unload_nls:
unload_nls(sbi->nls);
unload_nls(nls);
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 10e515a0d45..7daf4b852d1 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -272,9 +272,9 @@ reread:
return 0;
out_free_backup_vhdr:
- kfree(sbi->s_backup_vhdr);
+ kfree(sbi->s_backup_vhdr_buf);
out_free_vhdr:
- kfree(sbi->s_vhdr);
+ kfree(sbi->s_vhdr_buf);
out:
return error;
}
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
index cfeb7164b08..0f20208df60 100644
--- a/fs/jffs2/security.c
+++ b/fs/jffs2/security.c
@@ -22,26 +22,29 @@
#include <linux/security.h>
#include "nodelist.h"
-/* ---- Initial Security Label Attachment -------------- */
-int jffs2_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+/* ---- Initial Security Label(s) Attachment callback --- */
+int jffs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int rc;
- size_t len;
- void *value;
- char *name;
+ const struct xattr *xattr;
+ int err = 0;
- rc = security_inode_init_security(inode, dir, qstr, &name, &value, &len);
- if (rc) {
- if (rc == -EOPNOTSUPP)
- return 0;
- return rc;
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY,
+ xattr->name, xattr->value,
+ xattr->value_len, 0);
+ if (err < 0)
+ break;
}
- rc = do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY, name, value, len, 0);
+ return err;
+}
- kfree(name);
- kfree(value);
- return rc;
+/* ---- Initial Security Label(s) Attachment ----------- */
+int jffs2_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &jffs2_initxattrs, NULL);
}
/* ---- XATTR Handler for "security.*" ----------------- */
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index e87fedef23d..26683e15b3a 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -1089,38 +1089,37 @@ int jfs_removexattr(struct dentry *dentry, const char *name)
}
#ifdef CONFIG_JFS_SECURITY
-int jfs_init_security(tid_t tid, struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+int jfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int rc;
- size_t len;
- void *value;
- char *suffix;
+ const struct xattr *xattr;
+ tid_t *tid = fs_info;
char *name;
-
- rc = security_inode_init_security(inode, dir, qstr, &suffix, &value,
- &len);
- if (rc) {
- if (rc == -EOPNOTSUPP)
- return 0;
- return rc;
- }
- name = kmalloc(XATTR_SECURITY_PREFIX_LEN + 1 + strlen(suffix),
- GFP_NOFS);
- if (!name) {
- rc = -ENOMEM;
- goto kmalloc_failed;
+ int err = 0;
+
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
+ strlen(xattr->name) + 1, GFP_NOFS);
+ if (!name) {
+ err = -ENOMEM;
+ break;
+ }
+ strcpy(name, XATTR_SECURITY_PREFIX);
+ strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
+
+ err = __jfs_setxattr(*tid, inode, name,
+ xattr->value, xattr->value_len, 0);
+ kfree(name);
+ if (err < 0)
+ break;
}
- strcpy(name, XATTR_SECURITY_PREFIX);
- strcpy(name + XATTR_SECURITY_PREFIX_LEN, suffix);
-
- rc = __jfs_setxattr(tid, inode, name, value, len, 0);
-
- kfree(name);
-kmalloc_failed:
- kfree(suffix);
- kfree(value);
+ return err;
+}
- return rc;
+int jfs_init_security(tid_t tid, struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &jfs_initxattrs, &tid);
}
#endif
diff --git a/fs/namei.c b/fs/namei.c
index f4788365ea2..0b3138de2a3 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -721,12 +721,6 @@ static int follow_automount(struct path *path, unsigned flags,
if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
return -EREMOTE;
- /* We don't want to mount if someone supplied AT_NO_AUTOMOUNT
- * and this is the terminal part of the path.
- */
- if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT))
- return -EISDIR; /* we actually want to stop here */
-
/* We don't want to mount if someone's just doing a stat -
* unless they're stat'ing a directory and appended a '/' to
* the name.
@@ -739,7 +733,7 @@ static int follow_automount(struct path *path, unsigned flags,
* of the daemon to instantiate them before they can be used.
*/
if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
- LOOKUP_OPEN | LOOKUP_CREATE)) &&
+ LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
path->dentry->d_inode)
return -EISDIR;
diff --git a/fs/namespace.c b/fs/namespace.c
index 22bfe8273c6..b4febb29d3b 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1757,7 +1757,7 @@ static int do_loopback(struct path *path, char *old_name,
return err;
if (!old_name || !*old_name)
return -EINVAL;
- err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
+ err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
if (err)
return err;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 1ec1a85fa71..3e93e9a1bee 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -56,6 +56,9 @@ enum nfs4_session_state {
NFS4_SESSION_DRAINING,
};
+#define NFS4_RENEW_TIMEOUT 0x01
+#define NFS4_RENEW_DELEGATION_CB 0x02
+
struct nfs4_minor_version_ops {
u32 minor_version;
@@ -225,7 +228,7 @@ struct nfs4_state_recovery_ops {
};
struct nfs4_state_maintenance_ops {
- int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *);
+ int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *, unsigned);
struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *);
int (*renew_lease)(struct nfs_client *, struct rpc_cred *);
};
@@ -237,8 +240,6 @@ extern const struct inode_operations nfs4_dir_inode_operations;
extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
-extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
-extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
@@ -349,6 +350,7 @@ extern void nfs4_close_sync(struct nfs4_state *, fmode_t);
extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
extern void nfs4_schedule_lease_recovery(struct nfs_client *);
extern void nfs4_schedule_state_manager(struct nfs_client *);
+extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp);
extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
extern void nfs41_handle_recall_slot(struct nfs_client *clp);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 8c77039e7a8..4700fae1ada 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3374,9 +3374,13 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata)
if (task->tk_status < 0) {
/* Unless we're shutting down, schedule state recovery! */
- if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0)
+ if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
+ return;
+ if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
nfs4_schedule_lease_recovery(clp);
- return;
+ return;
+ }
+ nfs4_schedule_path_down_recovery(clp);
}
do_renew_lease(clp, timestamp);
}
@@ -3386,7 +3390,7 @@ static const struct rpc_call_ops nfs4_renew_ops = {
.rpc_release = nfs4_renew_release,
};
-int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
+static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
@@ -3395,9 +3399,11 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
};
struct nfs4_renewdata *data;
+ if (renew_flags == 0)
+ return 0;
if (!atomic_inc_not_zero(&clp->cl_count))
return -EIO;
- data = kmalloc(sizeof(*data), GFP_KERNEL);
+ data = kmalloc(sizeof(*data), GFP_NOFS);
if (data == NULL)
return -ENOMEM;
data->client = clp;
@@ -3406,7 +3412,7 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
&nfs4_renew_ops, data);
}
-int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
+static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
@@ -5504,11 +5510,13 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_
return rpc_run_task(&task_setup_data);
}
-static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred)
+static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
{
struct rpc_task *task;
int ret = 0;
+ if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
+ return 0;
task = _nfs41_proc_sequence(clp, cred);
if (IS_ERR(task))
ret = PTR_ERR(task);
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index df8e7f3ca56..dc484c0eae7 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -60,6 +60,7 @@ nfs4_renew_state(struct work_struct *work)
struct rpc_cred *cred;
long lease;
unsigned long last, now;
+ unsigned renew_flags = 0;
ops = clp->cl_mvops->state_renewal_ops;
dprintk("%s: start\n", __func__);
@@ -72,18 +73,23 @@ nfs4_renew_state(struct work_struct *work)
last = clp->cl_last_renewal;
now = jiffies;
/* Are we close to a lease timeout? */
- if (time_after(now, last + lease/3)) {
+ if (time_after(now, last + lease/3))
+ renew_flags |= NFS4_RENEW_TIMEOUT;
+ if (nfs_delegations_present(clp))
+ renew_flags |= NFS4_RENEW_DELEGATION_CB;
+
+ if (renew_flags != 0) {
cred = ops->get_state_renewal_cred_locked(clp);
spin_unlock(&clp->cl_lock);
if (cred == NULL) {
- if (!nfs_delegations_present(clp)) {
+ if (!(renew_flags & NFS4_RENEW_DELEGATION_CB)) {
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
goto out;
}
nfs_expire_all_delegations(clp);
} else {
/* Queue an asynchronous RENEW. */
- ops->sched_state_renewal(clp, cred);
+ ops->sched_state_renewal(clp, cred, renew_flags);
put_rpccred(cred);
goto out_exp;
}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 72ab97ef3d6..39914be40b0 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1038,6 +1038,12 @@ void nfs4_schedule_lease_recovery(struct nfs_client *clp)
nfs4_schedule_state_manager(clp);
}
+void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
+{
+ nfs_handle_cb_pathdown(clp);
+ nfs4_schedule_state_manager(clp);
+}
+
static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
{
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index b961ceac66b..5b19b6aabe1 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2035,9 +2035,6 @@ static inline void nfs_initialise_sb(struct super_block *sb)
sb->s_blocksize = nfs_block_bits(server->wsize,
&sb->s_blocksize_bits);
- if (server->flags & NFS_MOUNT_NOAC)
- sb->s_flags |= MS_SYNCHRONOUS;
-
sb->s_bdi = &server->backing_dev_info;
nfs_super_set_maxbytes(sb, server->maxfilesize);
@@ -2249,6 +2246,10 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
if (server->flags & NFS_MOUNT_UNSHARED)
compare_super = NULL;
+ /* -o noac implies -o sync */
+ if (server->flags & NFS_MOUNT_NOAC)
+ sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
/* Get a superblock - note that we may end up sharing one that already exists */
s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata);
if (IS_ERR(s)) {
@@ -2361,6 +2362,10 @@ nfs_xdev_mount(struct file_system_type *fs_type, int flags,
if (server->flags & NFS_MOUNT_UNSHARED)
compare_super = NULL;
+ /* -o noac implies -o sync */
+ if (server->flags & NFS_MOUNT_NOAC)
+ sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
/* Get a superblock - note that we may end up sharing one that already exists */
s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
if (IS_ERR(s)) {
@@ -2628,6 +2633,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
if (server->flags & NFS4_MOUNT_UNSHARED)
compare_super = NULL;
+ /* -o noac implies -o sync */
+ if (server->flags & NFS_MOUNT_NOAC)
+ sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
/* Get a superblock - note that we may end up sharing one that already exists */
s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
if (IS_ERR(s)) {
@@ -2789,7 +2798,7 @@ static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt,
goto out_put_mnt_ns;
ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt,
- export_path, LOOKUP_FOLLOW, &path);
+ export_path, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
nfs_referral_loop_unprotect();
put_mnt_ns(ns_private);
@@ -2916,6 +2925,10 @@ nfs4_xdev_mount(struct file_system_type *fs_type, int flags,
if (server->flags & NFS4_MOUNT_UNSHARED)
compare_super = NULL;
+ /* -o noac implies -o sync */
+ if (server->flags & NFS_MOUNT_NOAC)
+ sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
/* Get a superblock - note that we may end up sharing one that already exists */
s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
if (IS_ERR(s)) {
@@ -3003,6 +3016,10 @@ nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags,
if (server->flags & NFS4_MOUNT_UNSHARED)
compare_super = NULL;
+ /* -o noac implies -o sync */
+ if (server->flags & NFS_MOUNT_NOAC)
+ sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
/* Get a superblock - note that we may end up sharing one that already exists */
s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
if (IS_ERR(s)) {
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index b39b37f8091..c9bd2a6b7d4 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -958,7 +958,7 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head
if (!data)
goto out_bad;
data->pagevec[0] = page;
- nfs_write_rpcsetup(req, data, wsize, offset, desc->pg_ioflags);
+ nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags);
list_add(&data->list, res);
requests++;
nbytes -= len;
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 81ecf9c0bf0..194fb22ef79 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -7185,20 +7185,9 @@ int ocfs2_init_security_and_acl(struct inode *dir,
{
int ret = 0;
struct buffer_head *dir_bh = NULL;
- struct ocfs2_security_xattr_info si = {
- .enable = 1,
- };
- ret = ocfs2_init_security_get(inode, dir, qstr, &si);
+ ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
if (!ret) {
- ret = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
- si.name, si.value, si.value_len,
- XATTR_CREATE);
- if (ret) {
- mlog_errno(ret);
- goto leave;
- }
- } else if (ret != -EOPNOTSUPP) {
mlog_errno(ret);
goto leave;
}
@@ -7255,6 +7244,22 @@ static int ocfs2_xattr_security_set(struct dentry *dentry, const char *name,
name, value, size, flags);
}
+int ocfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
+{
+ const struct xattr *xattr;
+ int err = 0;
+
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
+ xattr->name, xattr->value,
+ xattr->value_len, XATTR_CREATE);
+ if (err)
+ break;
+ }
+ return err;
+}
+
int ocfs2_init_security_get(struct inode *inode,
struct inode *dir,
const struct qstr *qstr,
@@ -7263,8 +7268,13 @@ int ocfs2_init_security_get(struct inode *inode,
/* check whether ocfs2 support feature xattr */
if (!ocfs2_supports_xattr(OCFS2_SB(dir->i_sb)))
return -EOPNOTSUPP;
- return security_inode_init_security(inode, dir, qstr, &si->name,
- &si->value, &si->value_len);
+ if (si)
+ return security_old_inode_init_security(inode, dir, qstr,
+ &si->name, &si->value,
+ &si->value_len);
+
+ return security_inode_init_security(inode, dir, qstr,
+ &ocfs2_initxattrs, NULL);
}
int ocfs2_init_security_set(handle_t *handle,
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 25b6a887adb..5afaa58a863 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -877,30 +877,54 @@ struct numa_maps_private {
struct numa_maps md;
};
-static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty)
+static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
+ unsigned long nr_pages)
{
int count = page_mapcount(page);
- md->pages++;
+ md->pages += nr_pages;
if (pte_dirty || PageDirty(page))
- md->dirty++;
+ md->dirty += nr_pages;
if (PageSwapCache(page))
- md->swapcache++;
+ md->swapcache += nr_pages;
if (PageActive(page) || PageUnevictable(page))
- md->active++;
+ md->active += nr_pages;
if (PageWriteback(page))
- md->writeback++;
+ md->writeback += nr_pages;
if (PageAnon(page))
- md->anon++;
+ md->anon += nr_pages;
if (count > md->mapcount_max)
md->mapcount_max = count;
- md->node[page_to_nid(page)]++;
+ md->node[page_to_nid(page)] += nr_pages;
+}
+
+static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct page *page;
+ int nid;
+
+ if (!pte_present(pte))
+ return NULL;
+
+ page = vm_normal_page(vma, addr, pte);
+ if (!page)
+ return NULL;
+
+ if (PageReserved(page))
+ return NULL;
+
+ nid = page_to_nid(page);
+ if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
+ return NULL;
+
+ return page;
}
static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
@@ -912,26 +936,32 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
pte_t *pte;
md = walk->private;
- orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
- do {
- struct page *page;
- int nid;
+ spin_lock(&walk->mm->page_table_lock);
+ if (pmd_trans_huge(*pmd)) {
+ if (pmd_trans_splitting(*pmd)) {
+ spin_unlock(&walk->mm->page_table_lock);
+ wait_split_huge_page(md->vma->anon_vma, pmd);
+ } else {
+ pte_t huge_pte = *(pte_t *)pmd;
+ struct page *page;
- if (!pte_present(*pte))
- continue;
+ page = can_gather_numa_stats(huge_pte, md->vma, addr);
+ if (page)
+ gather_stats(page, md, pte_dirty(huge_pte),
+ HPAGE_PMD_SIZE/PAGE_SIZE);
+ spin_unlock(&walk->mm->page_table_lock);
+ return 0;
+ }
+ } else {
+ spin_unlock(&walk->mm->page_table_lock);
+ }
- page = vm_normal_page(md->vma, addr, *pte);
+ orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ do {
+ struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
if (!page)
continue;
-
- if (PageReserved(page))
- continue;
-
- nid = page_to_nid(page);
- if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
- continue;
-
- gather_stats(page, md, pte_dirty(*pte));
+ gather_stats(page, md, pte_dirty(*pte), 1);
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap_unlock(orig_pte, ptl);
@@ -952,7 +982,7 @@ static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
return 0;
md = walk->private;
- gather_stats(page, md, pte_dirty(*pte));
+ gather_stats(page, md, pte_dirty(*pte), 1);
return 0;
}
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index b34bdb25490..10b6be3ca28 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -355,7 +355,7 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
* resolution (think about autofs) and thus deadlocks could arise.
*/
if (cmds == Q_QUOTAON) {
- ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW, &path);
+ ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
if (ret)
pathp = ERR_PTR(ret);
else
diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
index ef66c18a933..534668fa41b 100644
--- a/fs/reiserfs/xattr_security.c
+++ b/fs/reiserfs/xattr_security.c
@@ -66,8 +66,8 @@ int reiserfs_security_init(struct inode *dir, struct inode *inode,
if (IS_PRIVATE(dir))
return 0;
- error = security_inode_init_security(inode, dir, qstr, &sec->name,
- &sec->value, &sec->length);
+ error = security_old_inode_init_security(inode, dir, qstr, &sec->name,
+ &sec->value, &sec->length);
if (error) {
if (error == -EOPNOTSUPP)
error = 0;
diff --git a/fs/stat.c b/fs/stat.c
index ba5316ffac6..78a3aa83c7e 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -81,8 +81,6 @@ int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
if (!(flag & AT_SYMLINK_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
- if (flag & AT_NO_AUTOMOUNT)
- lookup_flags |= LOOKUP_NO_AUTOMOUNT;
if (flag & AT_EMPTY_PATH)
lookup_flags |= LOOKUP_EMPTY;
diff --git a/fs/xattr.c b/fs/xattr.c
index f060663ab70..67583de8218 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -14,6 +14,7 @@
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/security.h>
+#include <linux/evm.h>
#include <linux/syscalls.h>
#include <linux/module.h>
#include <linux/fsnotify.h>
@@ -166,6 +167,64 @@ out_noalloc:
}
EXPORT_SYMBOL_GPL(xattr_getsecurity);
+/*
+ * vfs_getxattr_alloc - allocate memory, if necessary, before calling getxattr
+ *
+ * Allocate memory, if not already allocated, or re-allocate correct size,
+ * before retrieving the extended attribute.
+ *
+ * Returns the result of alloc, if failed, or the getxattr operation.
+ */
+ssize_t
+vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value,
+ size_t xattr_size, gfp_t flags)
+{
+ struct inode *inode = dentry->d_inode;
+ char *value = *xattr_value;
+ int error;
+
+ error = xattr_permission(inode, name, MAY_READ);
+ if (error)
+ return error;
+
+ if (!inode->i_op->getxattr)
+ return -EOPNOTSUPP;
+
+ error = inode->i_op->getxattr(dentry, name, NULL, 0);
+ if (error < 0)
+ return error;
+
+ if (!value || (error > xattr_size)) {
+ value = krealloc(*xattr_value, error + 1, flags);
+ if (!value)
+ return -ENOMEM;
+ memset(value, 0, error + 1);
+ }
+
+ error = inode->i_op->getxattr(dentry, name, value, error);
+ *xattr_value = value;
+ return error;
+}
+
+/* Compare an extended attribute value with the given value */
+int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
+ const char *value, size_t size, gfp_t flags)
+{
+ char *xattr_value = NULL;
+ int rc;
+
+ rc = vfs_getxattr_alloc(dentry, xattr_name, &xattr_value, 0, flags);
+ if (rc < 0)
+ return rc;
+
+ if ((rc != size) || (memcmp(xattr_value, value, rc) != 0))
+ rc = -EINVAL;
+ else
+ rc = 0;
+ kfree(xattr_value);
+ return rc;
+}
+
ssize_t
vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
{
@@ -243,8 +302,10 @@ vfs_removexattr(struct dentry *dentry, const char *name)
error = inode->i_op->removexattr(dentry, name);
mutex_unlock(&inode->i_mutex);
- if (!error)
+ if (!error) {
fsnotify_xattr(dentry);
+ evm_inode_post_removexattr(dentry, name);
+ }
return error;
}
EXPORT_SYMBOL_GPL(vfs_removexattr);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index cac2ecfa674..ef43fce519a 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -629,7 +629,7 @@ xfs_buf_item_push(
* the xfsbufd to get this buffer written. We have to unlock the buffer
* to allow the xfsbufd to write it, too.
*/
-STATIC void
+STATIC bool
xfs_buf_item_pushbuf(
struct xfs_log_item *lip)
{
@@ -643,6 +643,7 @@ xfs_buf_item_pushbuf(
xfs_buf_delwri_promote(bp);
xfs_buf_relse(bp);
+ return true;
}
STATIC void
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 9e0e2fa3f2c..bb3f71d236d 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -183,13 +183,14 @@ xfs_qm_dqunpin_wait(
* search the buffer cache can be a time consuming thing, and AIL lock is a
* spinlock.
*/
-STATIC void
+STATIC bool
xfs_qm_dquot_logitem_pushbuf(
struct xfs_log_item *lip)
{
struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
struct xfs_dquot *dqp = qlip->qli_dquot;
struct xfs_buf *bp;
+ bool ret = true;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
@@ -201,17 +202,20 @@ xfs_qm_dquot_logitem_pushbuf(
if (completion_done(&dqp->q_flush) ||
!(lip->li_flags & XFS_LI_IN_AIL)) {
xfs_dqunlock(dqp);
- return;
+ return true;
}
bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
xfs_dqunlock(dqp);
if (!bp)
- return;
+ return true;
if (XFS_BUF_ISDELAYWRITE(bp))
xfs_buf_delwri_promote(bp);
+ if (xfs_buf_ispinned(bp))
+ ret = false;
xfs_buf_relse(bp);
+ return ret;
}
/*
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 588406dc6a3..836ad80d4f2 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -708,13 +708,14 @@ xfs_inode_item_committed(
* marked delayed write. If that's the case, we'll promote it and that will
* allow the caller to write the buffer by triggering the xfsbufd to run.
*/
-STATIC void
+STATIC bool
xfs_inode_item_pushbuf(
struct xfs_log_item *lip)
{
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
struct xfs_inode *ip = iip->ili_inode;
struct xfs_buf *bp;
+ bool ret = true;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
@@ -725,7 +726,7 @@ xfs_inode_item_pushbuf(
if (completion_done(&ip->i_flush) ||
!(lip->li_flags & XFS_LI_IN_AIL)) {
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- return;
+ return true;
}
bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
@@ -733,10 +734,13 @@ xfs_inode_item_pushbuf(
xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (!bp)
- return;
+ return true;
if (XFS_BUF_ISDELAYWRITE(bp))
xfs_buf_delwri_promote(bp);
+ if (xfs_buf_ispinned(bp))
+ ret = false;
xfs_buf_relse(bp);
+ return ret;
}
/*
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 673704fab74..28856accb4f 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -102,37 +102,38 @@ xfs_mark_inode_dirty(
}
+
+int xfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
+{
+ const struct xattr *xattr;
+ struct xfs_inode *ip = XFS_I(inode);
+ int error = 0;
+
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ error = xfs_attr_set(ip, xattr->name, xattr->value,
+ xattr->value_len, ATTR_SECURE);
+ if (error < 0)
+ break;
+ }
+ return error;
+}
+
/*
* Hook in SELinux. This is not quite correct yet, what we really need
* here (as we do for default ACLs) is a mechanism by which creation of
* these attrs can be journalled at inode creation time (along with the
* inode, of course, such that log replay can't cause these to be lost).
*/
+
STATIC int
xfs_init_security(
struct inode *inode,
struct inode *dir,
const struct qstr *qstr)
{
- struct xfs_inode *ip = XFS_I(inode);
- size_t length;
- void *value;
- unsigned char *name;
- int error;
-
- error = security_inode_init_security(inode, dir, qstr, (char **)&name,
- &value, &length);
- if (error) {
- if (error == -EOPNOTSUPP)
- return 0;
- return -error;
- }
-
- error = xfs_attr_set(ip, name, value, length, ATTR_SECURE);
-
- kfree(name);
- kfree(value);
- return error;
+ return security_inode_init_security(inode, dir, qstr,
+ &xfs_initxattrs, NULL);
}
static void
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 1e8a45e74c3..828662f70d6 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -68,6 +68,8 @@
#include <linux/ctype.h>
#include <linux/writeback.h>
#include <linux/capability.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
#include <linux/list_sort.h>
#include <asm/page.h>
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 2366c54cc4f..5cf06b85fd9 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1652,24 +1652,13 @@ xfs_init_workqueues(void)
*/
xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
if (!xfs_syncd_wq)
- goto out;
-
- xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
- if (!xfs_ail_wq)
- goto out_destroy_syncd;
-
+ return -ENOMEM;
return 0;
-
-out_destroy_syncd:
- destroy_workqueue(xfs_syncd_wq);
-out:
- return -ENOMEM;
}
STATIC void
xfs_destroy_workqueues(void)
{
- destroy_workqueue(xfs_ail_wq);
destroy_workqueue(xfs_syncd_wq);
}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 06a9759b635..53597f4db9b 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -350,7 +350,7 @@ typedef struct xfs_item_ops {
void (*iop_unlock)(xfs_log_item_t *);
xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
void (*iop_push)(xfs_log_item_t *);
- void (*iop_pushbuf)(xfs_log_item_t *);
+ bool (*iop_pushbuf)(xfs_log_item_t *);
void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
} xfs_item_ops_t;
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index c15aa29fa16..3a1e7ca54c2 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,8 +28,6 @@
#include "xfs_trans_priv.h"
#include "xfs_error.h"
-struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
-
#ifdef DEBUG
/*
* Check that the list is sorted as it should be.
@@ -356,16 +354,10 @@ xfs_ail_delete(
xfs_trans_ail_cursor_clear(ailp, lip);
}
-/*
- * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
- * to run at a later time if there is more work to do to complete the push.
- */
-STATIC void
-xfs_ail_worker(
- struct work_struct *work)
+static long
+xfsaild_push(
+ struct xfs_ail *ailp)
{
- struct xfs_ail *ailp = container_of(to_delayed_work(work),
- struct xfs_ail, xa_work);
xfs_mount_t *mp = ailp->xa_mount;
struct xfs_ail_cursor cur;
xfs_log_item_t *lip;
@@ -427,8 +419,13 @@ xfs_ail_worker(
case XFS_ITEM_PUSHBUF:
XFS_STATS_INC(xs_push_ail_pushbuf);
- IOP_PUSHBUF(lip);
- ailp->xa_last_pushed_lsn = lsn;
+
+ if (!IOP_PUSHBUF(lip)) {
+ stuck++;
+ flush_log = 1;
+ } else {
+ ailp->xa_last_pushed_lsn = lsn;
+ }
push_xfsbufd = 1;
break;
@@ -440,7 +437,6 @@ xfs_ail_worker(
case XFS_ITEM_LOCKED:
XFS_STATS_INC(xs_push_ail_locked);
- ailp->xa_last_pushed_lsn = lsn;
stuck++;
break;
@@ -501,20 +497,6 @@ out_done:
/* We're past our target or empty, so idle */
ailp->xa_last_pushed_lsn = 0;
- /*
- * We clear the XFS_AIL_PUSHING_BIT first before checking
- * whether the target has changed. If the target has changed,
- * this pushes the requeue race directly onto the result of the
- * atomic test/set bit, so we are guaranteed that either the
- * the pusher that changed the target or ourselves will requeue
- * the work (but not both).
- */
- clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
- smp_rmb();
- if (XFS_LSN_CMP(ailp->xa_target, target) == 0 ||
- test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
- return;
-
tout = 50;
} else if (XFS_LSN_CMP(lsn, target) >= 0) {
/*
@@ -537,9 +519,30 @@ out_done:
tout = 20;
}
- /* There is more to do, requeue us. */
- queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
- msecs_to_jiffies(tout));
+ return tout;
+}
+
+static int
+xfsaild(
+ void *data)
+{
+ struct xfs_ail *ailp = data;
+ long tout = 0; /* milliseconds */
+
+ while (!kthread_should_stop()) {
+ if (tout && tout <= 20)
+ __set_current_state(TASK_KILLABLE);
+ else
+ __set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(tout ?
+ msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
+
+ try_to_freeze();
+
+ tout = xfsaild_push(ailp);
+ }
+
+ return 0;
}
/*
@@ -574,8 +577,9 @@ xfs_ail_push(
*/
smp_wmb();
xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
- if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
- queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
+ smp_wmb();
+
+ wake_up_process(ailp->xa_task);
}
/*
@@ -813,9 +817,18 @@ xfs_trans_ail_init(
INIT_LIST_HEAD(&ailp->xa_ail);
INIT_LIST_HEAD(&ailp->xa_cursors);
spin_lock_init(&ailp->xa_lock);
- INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
+
+ ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
+ ailp->xa_mount->m_fsname);
+ if (IS_ERR(ailp->xa_task))
+ goto out_free_ailp;
+
mp->m_ail = ailp;
return 0;
+
+out_free_ailp:
+ kmem_free(ailp);
+ return ENOMEM;
}
void
@@ -824,6 +837,6 @@ xfs_trans_ail_destroy(
{
struct xfs_ail *ailp = mp->m_ail;
- cancel_delayed_work_sync(&ailp->xa_work);
+ kthread_stop(ailp->xa_task);
kmem_free(ailp);
}
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 212946b9723..22750b5e4a8 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -64,23 +64,17 @@ struct xfs_ail_cursor {
*/
struct xfs_ail {
struct xfs_mount *xa_mount;
+ struct task_struct *xa_task;
struct list_head xa_ail;
xfs_lsn_t xa_target;
struct list_head xa_cursors;
spinlock_t xa_lock;
- struct delayed_work xa_work;
xfs_lsn_t xa_last_pushed_lsn;
- unsigned long xa_flags;
};
-#define XFS_AIL_PUSHING_BIT 0
-
/*
* From xfs_trans_ail.c
*/
-
-extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
-
void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
struct xfs_ail_cursor *cur,
struct xfs_log_item **log_items, int nr_items,
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index db22d136ad0..b5e2e4c6b01 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -222,7 +222,6 @@
VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
*(__tracepoints_ptrs) /* Tracepoints: pointer array */\
VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
- *(__markers_strings) /* Markers: strings */ \
*(__tracepoints_strings)/* Tracepoints: strings */ \
} \
\
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 32f0076e844..71fc53bb8f1 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -124,6 +124,7 @@ enum rq_flag_bits {
__REQ_SYNC, /* request is sync (sync write or read) */
__REQ_META, /* metadata io request */
+ __REQ_PRIO, /* boost priority in cfq */
__REQ_DISCARD, /* request to discard sectors */
__REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
@@ -161,14 +162,15 @@ enum rq_flag_bits {
#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
#define REQ_SYNC (1 << __REQ_SYNC)
#define REQ_META (1 << __REQ_META)
+#define REQ_PRIO (1 << __REQ_PRIO)
#define REQ_DISCARD (1 << __REQ_DISCARD)
#define REQ_NOIDLE (1 << __REQ_NOIDLE)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
- (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
- REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
+ (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
+ REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
#define REQ_CLONE_MASK REQ_COMMON_MASK
#define REQ_RAHEAD (1 << __REQ_RAHEAD)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 84b15d54f8c..7fbaa910334 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -873,7 +873,6 @@ struct blk_plug {
struct list_head list;
struct list_head cb_list;
unsigned int should_sort;
- unsigned int count;
};
#define BLK_MAX_REQUEST_COUNT 16
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 3fa1f3d90ce..99e3e50b5c5 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -197,6 +197,11 @@ struct dm_target {
* whether or not its underlying devices have support.
*/
unsigned discards_supported:1;
+
+ /*
+ * Set if this target does not return zeroes on discarded blocks.
+ */
+ unsigned discard_zeroes_data_unsupported:1;
};
/* Each target can link one of these into the table */
diff --git a/include/linux/evm.h b/include/linux/evm.h
new file mode 100644
index 00000000000..9fc13a76092
--- /dev/null
+++ b/include/linux/evm.h
@@ -0,0 +1,100 @@
+/*
+ * evm.h
+ *
+ * Copyright (c) 2009 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ */
+
+#ifndef _LINUX_EVM_H
+#define _LINUX_EVM_H
+
+#include <linux/integrity.h>
+#include <linux/xattr.h>
+
+struct integrity_iint_cache;
+
+#ifdef CONFIG_EVM
+extern enum integrity_status evm_verifyxattr(struct dentry *dentry,
+ const char *xattr_name,
+ void *xattr_value,
+ size_t xattr_value_len,
+ struct integrity_iint_cache *iint);
+extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr);
+extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid);
+extern int evm_inode_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size);
+extern void evm_inode_post_setxattr(struct dentry *dentry,
+ const char *xattr_name,
+ const void *xattr_value,
+ size_t xattr_value_len);
+extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name);
+extern void evm_inode_post_removexattr(struct dentry *dentry,
+ const char *xattr_name);
+extern int evm_inode_init_security(struct inode *inode,
+ const struct xattr *xattr_array,
+ struct xattr *evm);
+#ifdef CONFIG_FS_POSIX_ACL
+extern int posix_xattr_acl(const char *xattrname);
+#else
+static inline int posix_xattr_acl(const char *xattrname)
+{
+ return 0;
+}
+#endif
+#else
+#ifdef CONFIG_INTEGRITY
+static inline enum integrity_status evm_verifyxattr(struct dentry *dentry,
+ const char *xattr_name,
+ void *xattr_value,
+ size_t xattr_value_len,
+ struct integrity_iint_cache *iint)
+{
+ return INTEGRITY_UNKNOWN;
+}
+#endif
+
+static inline int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ return 0;
+}
+
+static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
+{
+ return;
+}
+
+static inline int evm_inode_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size)
+{
+ return 0;
+}
+
+static inline void evm_inode_post_setxattr(struct dentry *dentry,
+ const char *xattr_name,
+ const void *xattr_value,
+ size_t xattr_value_len)
+{
+ return;
+}
+
+static inline int evm_inode_removexattr(struct dentry *dentry,
+ const char *xattr_name)
+{
+ return 0;
+}
+
+static inline void evm_inode_post_removexattr(struct dentry *dentry,
+ const char *xattr_name)
+{
+ return;
+}
+
+static inline int evm_inode_init_security(struct inode *inode,
+ const struct xattr *xattr_array,
+ struct xattr *evm)
+{
+ return 0;
+}
+
+#endif /* CONFIG_EVM_H */
+#endif /* LINUX_EVM_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c2bd68f2277..277f497923a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -162,10 +162,8 @@ struct inodes_stat_t {
#define READA RWA_MASK
#define READ_SYNC (READ | REQ_SYNC)
-#define READ_META (READ | REQ_META)
#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
#define WRITE_ODIRECT (WRITE | REQ_SYNC)
-#define WRITE_META (WRITE | REQ_META)
#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH)
#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 9cf8e7ae745..deed5f9a1e1 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -71,6 +71,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/input.h>
+#include <linux/semaphore.h>
/*
* We parse each description item into this structure. Short items data
@@ -312,6 +313,7 @@ struct hid_item {
#define HID_QUIRK_BADPAD 0x00000020
#define HID_QUIRK_MULTI_INPUT 0x00000040
#define HID_QUIRK_HIDINPUT_FORCE 0x00000080
+#define HID_QUIRK_MULTITOUCH 0x00000100
#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
@@ -475,6 +477,7 @@ struct hid_device { /* device report descriptor */
unsigned country; /* HID country */
struct hid_report_enum report_enum[HID_REPORT_TYPES];
+ struct semaphore driver_lock; /* protects the current driver */
struct device dev; /* device */
struct hid_driver *driver;
struct hid_ll_driver *ll_driver;
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 09e6e62f995..6ac8e50c6cf 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -15,8 +15,6 @@ struct linux_binprm;
#ifdef CONFIG_IMA
extern int ima_bprm_check(struct linux_binprm *bprm);
-extern int ima_inode_alloc(struct inode *inode);
-extern void ima_inode_free(struct inode *inode);
extern int ima_file_check(struct file *file, int mask);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
@@ -27,16 +25,6 @@ static inline int ima_bprm_check(struct linux_binprm *bprm)
return 0;
}
-static inline int ima_inode_alloc(struct inode *inode)
-{
- return 0;
-}
-
-static inline void ima_inode_free(struct inode *inode)
-{
- return;
-}
-
static inline int ima_file_check(struct file *file, int mask)
{
return 0;
@@ -51,6 +39,5 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
{
return 0;
}
-
#endif /* CONFIG_IMA_H */
#endif /* _LINUX_IMA_H */
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
new file mode 100644
index 00000000000..a0c41256cb9
--- /dev/null
+++ b/include/linux/integrity.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2009 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#ifndef _LINUX_INTEGRITY_H
+#define _LINUX_INTEGRITY_H
+
+#include <linux/fs.h>
+
+enum integrity_status {
+ INTEGRITY_PASS = 0,
+ INTEGRITY_FAIL,
+ INTEGRITY_NOLABEL,
+ INTEGRITY_NOXATTRS,
+ INTEGRITY_UNKNOWN,
+};
+
+/* List of EVM protected security xattrs */
+#ifdef CONFIG_INTEGRITY
+extern int integrity_inode_alloc(struct inode *inode);
+extern void integrity_inode_free(struct inode *inode);
+
+#else
+static inline int integrity_inode_alloc(struct inode *inode)
+{
+ return 0;
+}
+
+static inline void integrity_inode_free(struct inode *inode)
+{
+ return;
+}
+#endif /* CONFIG_INTEGRITY_H */
+#endif /* _LINUX_INTEGRITY_H */
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index e807ad687a0..3ad553e8eae 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -80,6 +80,7 @@ extern void irq_domain_del(struct irq_domain *domain);
#endif /* CONFIG_IRQ_DOMAIN */
#if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ)
+extern struct irq_domain_ops irq_domain_simple_ops;
extern void irq_domain_add_simple(struct device_node *controller, int irq_base);
extern void irq_domain_generate_simple(const struct of_device_id *match,
u64 phys_base, unsigned int irq_start);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 46ac9a50528..8eefcf7e95e 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -382,7 +382,7 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
}
extern int hex_to_bin(char ch);
-extern void hex2bin(u8 *dst, const char *src, size_t count);
+extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
/*
* General tracing related utility functions - trace_printk(),
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 2c366b52f50..aace6b8691a 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -553,6 +553,7 @@ struct kvm_ppc_pvinfo {
#define KVM_CAP_SPAPR_TCE 63
#define KVM_CAP_PPC_SMT 64
#define KVM_CAP_PPC_RMA 65
+#define KVM_CAP_S390_GMAP 71
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index d12f8d635a8..97cf4f27d64 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -26,7 +26,7 @@ struct wm8994_ldo_pdata {
struct regulator_init_data *init_data;
};
-#define WM8994_CONFIGURE_GPIO 0x8000
+#define WM8994_CONFIGURE_GPIO 0x10000
#define WM8994_DRC_REGS 5
#define WM8994_EQ_REGS 20
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 76fe2c62ae7..409328d1cbb 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -48,11 +48,12 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
*/
#define LOOKUP_FOLLOW 0x0001
#define LOOKUP_DIRECTORY 0x0002
+#define LOOKUP_AUTOMOUNT 0x0004
#define LOOKUP_PARENT 0x0010
#define LOOKUP_REVAL 0x0020
#define LOOKUP_RCU 0x0040
-#define LOOKUP_NO_AUTOMOUNT 0x0080
+
/*
* Intent data
*/
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 8c230cbcbb4..9fc01226055 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -621,8 +621,9 @@ struct pci_driver {
extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss);
enum pcie_bus_config_types {
- PCIE_BUS_PERFORMANCE,
+ PCIE_BUS_TUNE_OFF,
PCIE_BUS_SAFE,
+ PCIE_BUS_PERFORMANCE,
PCIE_BUS_PEER2PEER,
};
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index e07e2742a86..1dc420ba213 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -51,6 +51,7 @@
#define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN)
#define PTP_EV_PORT 319
+#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
#define OFF_ETYPE 12
#define OFF_IHL 14
@@ -116,14 +117,20 @@ static inline int ptp_filter_init(struct sock_filter *f, int len)
{OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \
{OP_RETA, 0, 0, 0 }, /* */ \
/*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \
-/*L40*/ {OP_JEQ, 0, 6, ETH_P_8021Q }, /* f goto L50 */ \
+/*L40*/ {OP_JEQ, 0, 9, ETH_P_8021Q }, /* f goto L50 */ \
{OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \
- {OP_JEQ, 0, 9, ETH_P_1588 }, /* f goto L60 */ \
+ {OP_JEQ, 0, 15, ETH_P_1588 }, /* f goto L60 */ \
+ {OP_LDB, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
+ {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
+ {OP_JEQ, 0, 12, 0 }, /* f goto L6x */ \
{OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
{OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
{OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \
{OP_RETA, 0, 0, 0 }, /* */ \
-/*L50*/ {OP_JEQ, 0, 4, ETH_P_1588 }, /* f goto L61 */ \
+/*L50*/ {OP_JEQ, 0, 7, ETH_P_1588 }, /* f goto L61 */ \
+ {OP_LDB, 0, 0, ETH_HLEN }, /* */ \
+ {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
+ {OP_JEQ, 0, 4, 0 }, /* f goto L6x */ \
{OP_LDH, 0, 0, ETH_HLEN }, /* */ \
{OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
{OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4ac2c0578e0..41d0237fd44 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1956,7 +1956,6 @@ static inline void disable_sched_clock_irqtime(void) {}
extern unsigned long long
task_sched_runtime(struct task_struct *task);
-extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
diff --git a/include/linux/security.h b/include/linux/security.h
index ebd2a53a3d0..19d8e04e168 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -36,6 +36,7 @@
#include <linux/key.h>
#include <linux/xfrm.h>
#include <linux/slab.h>
+#include <linux/xattr.h>
#include <net/flow.h>
/* Maximum number of letters for an LSM name string */
@@ -147,6 +148,10 @@ extern int mmap_min_addr_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
#endif
+/* security_inode_init_security callback function to write xattrs */
+typedef int (*initxattrs) (struct inode *inode,
+ const struct xattr *xattr_array, void *fs_data);
+
#ifdef CONFIG_SECURITY
struct security_mnt_opts {
@@ -1367,7 +1372,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @inode_getsecctx:
* Returns a string containing all relavent security context information
*
- * @inode we wish to set the security context of.
+ * @inode we wish to get the security context of.
* @ctx is a pointer in which to place the allocated security context.
* @ctxlen points to the place to put the length of @ctx.
* This is the main security structure.
@@ -1655,6 +1660,8 @@ struct security_operations {
extern int security_init(void);
extern int security_module_enable(struct security_operations *ops);
extern int register_security(struct security_operations *ops);
+extern void __init security_fixup_ops(struct security_operations *ops);
+
/* Security operations */
int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
@@ -1704,8 +1711,11 @@ int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts);
int security_inode_alloc(struct inode *inode);
void security_inode_free(struct inode *inode);
int security_inode_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, char **name,
- void **value, size_t *len);
+ const struct qstr *qstr,
+ initxattrs initxattrs, void *fs_data);
+int security_old_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr, char **name,
+ void **value, size_t *len);
int security_inode_create(struct inode *dir, struct dentry *dentry, int mode);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
@@ -2034,11 +2044,19 @@ static inline void security_inode_free(struct inode *inode)
static inline int security_inode_init_security(struct inode *inode,
struct inode *dir,
const struct qstr *qstr,
- char **name,
- void **value,
- size_t *len)
+ initxattrs initxattrs,
+ void *fs_data)
{
- return -EOPNOTSUPP;
+ return 0;
+}
+
+static inline int security_old_inode_init_security(struct inode *inode,
+ struct inode *dir,
+ const struct qstr *qstr,
+ char **name, void **value,
+ size_t *len)
+{
+ return 0;
}
static inline int security_inode_create(struct inode *dir,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 7b996ed86d5..8bd383caa36 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -524,6 +524,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
+extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
extern struct sk_buff *skb_clone(struct sk_buff *skb,
gfp_t priority);
extern struct sk_buff *skb_copy(const struct sk_buff *skb,
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index 12b2b18e50c..e16557a357e 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -231,6 +231,8 @@ enum
LINUX_MIB_TCPDEFERACCEPTDROP,
LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */
LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */
+ LINUX_MIB_TCPREQQFULLDOCOOKIES, /* TCPReqQFullDoCookies */
+ LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */
__LINUX_MIB_MAX
};
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index aed54c50aa6..e5d12203154 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -30,6 +30,9 @@
#define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
/* Security namespace */
+#define XATTR_EVM_SUFFIX "evm"
+#define XATTR_NAME_EVM XATTR_SECURITY_PREFIX XATTR_EVM_SUFFIX
+
#define XATTR_SELINUX_SUFFIX "selinux"
#define XATTR_NAME_SELINUX XATTR_SECURITY_PREFIX XATTR_SELINUX_SUFFIX
@@ -49,6 +52,11 @@
#define XATTR_CAPS_SUFFIX "capability"
#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
+#define XATTR_POSIX_ACL_ACCESS "posix_acl_access"
+#define XATTR_NAME_POSIX_ACL_ACCESS XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_ACCESS
+#define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
+#define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
+
#ifdef __KERNEL__
#include <linux/types.h>
@@ -67,6 +75,12 @@ struct xattr_handler {
size_t size, int flags, int handler_flags);
};
+struct xattr {
+ char *name;
+ void *value;
+ size_t value_len;
+};
+
ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
@@ -78,7 +92,10 @@ ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer,
ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags);
int generic_removexattr(struct dentry *dentry, const char *name);
-
+ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name,
+ char **xattr_value, size_t size, gfp_t flags);
+int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
+ const char *value, size_t size, gfp_t flags);
#endif /* __KERNEL__ */
#endif /* _LINUX_XATTR_H */
diff --git a/include/net/flow.h b/include/net/flow.h
index 78113daadd6..a09447749e2 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -7,6 +7,7 @@
#ifndef _NET_FLOW_H
#define _NET_FLOW_H
+#include <linux/socket.h>
#include <linux/in6.h>
#include <linux/atomic.h>
@@ -68,7 +69,7 @@ struct flowi4 {
#define fl4_ipsec_spi uli.spi
#define fl4_mh_type uli.mht.type
#define fl4_gre_key uli.gre_key
-};
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
__u32 mark, __u8 tos, __u8 scope,
@@ -112,7 +113,7 @@ struct flowi6 {
#define fl6_ipsec_spi uli.spi
#define fl6_mh_type uli.mht.type
#define fl6_gre_key uli.gre_key
-};
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
struct flowidn {
struct flowi_common __fl_common;
@@ -127,7 +128,7 @@ struct flowidn {
union flowi_uli uli;
#define fld_sport uli.ports.sport
#define fld_dport uli.ports.dport
-};
+} __attribute__((__aligned__(BITS_PER_LONG/8)));
struct flowi {
union {
@@ -161,6 +162,24 @@ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
return container_of(fldn, struct flowi, u.dn);
}
+typedef unsigned long flow_compare_t;
+
+static inline size_t flow_key_size(u16 family)
+{
+ switch (family) {
+ case AF_INET:
+ BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t));
+ return sizeof(struct flowi4) / sizeof(flow_compare_t);
+ case AF_INET6:
+ BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t));
+ return sizeof(struct flowi6) / sizeof(flow_compare_t);
+ case AF_DECnet:
+ BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t));
+ return sizeof(struct flowidn) / sizeof(flow_compare_t);
+ }
+ return 0;
+}
+
#define FLOW_DIR_IN 0
#define FLOW_DIR_OUT 1
#define FLOW_DIR_FWD 2
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 1aaf915656f..8fa4430f99c 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -900,6 +900,7 @@ struct netns_ipvs {
volatile int sync_state;
volatile int master_syncid;
volatile int backup_syncid;
+ struct mutex sync_mutex;
/* multicast interface name */
char master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 99e6e19b57c..4c0766e201e 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -96,7 +96,8 @@ extern int sysctl_max_syn_backlog;
*/
struct listen_sock {
u8 max_qlen_log;
- /* 3 bytes hole, try to use */
+ u8 synflood_warned;
+ /* 2 bytes hole, try to use */
int qlen;
int qlen_young;
int clock_hand;
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 6506458ccd3..712b3bebeda 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -109,6 +109,7 @@ typedef enum {
SCTP_CMD_SEND_MSG, /* Send the whole use message */
SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
+ SCTP_CMD_SET_ASOC, /* Restore association context */
SCTP_CMD_LAST
} sctp_verb_t;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 149a415d1e0..acc620a4a45 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -431,17 +431,34 @@ extern int tcp_disconnect(struct sock *sk, int flags);
extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct ip_options *opt);
+#ifdef CONFIG_SYN_COOKIES
extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
__u16 *mss);
+#else
+static inline __u32 cookie_v4_init_sequence(struct sock *sk,
+ struct sk_buff *skb,
+ __u16 *mss)
+{
+ return 0;
+}
+#endif
extern __u32 cookie_init_timestamp(struct request_sock *req);
extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
/* From net/ipv6/syncookies.c */
extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
+#ifdef CONFIG_SYN_COOKIES
extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
__u16 *mss);
-
+#else
+static inline __u32 cookie_v6_init_sequence(struct sock *sk,
+ struct sk_buff *skb,
+ __u16 *mss)
+{
+ return 0;
+}
+#endif
/* tcp_output.c */
extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
@@ -460,6 +477,9 @@ extern int tcp_write_wakeup(struct sock *);
extern void tcp_send_fin(struct sock *sk);
extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
extern int tcp_send_synack(struct sock *);
+extern int tcp_syn_flood_action(struct sock *sk,
+ const struct sk_buff *skb,
+ const char *proto);
extern void tcp_push_one(struct sock *, unsigned int mss_now);
extern void tcp_send_ack(struct sock *sk);
extern void tcp_send_delayed_ack(struct sock *sk);
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index 5271a741c3a..498433dd067 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -39,6 +39,7 @@ extern int datagram_recv_ctl(struct sock *sk,
struct sk_buff *skb);
extern int datagram_send_ctl(struct net *net,
+ struct sock *sk,
struct msghdr *msg,
struct flowi6 *fl6,
struct ipv6_txoptions *opt,
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 673a024c6b2..5f097ca7d5c 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -66,40 +66,34 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
return 0;
}
-static inline int udplite_sender_cscov(struct udp_sock *up, struct udphdr *uh)
+/* Slow-path computation of checksum. Socket is locked. */
+static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
{
+ const struct udp_sock *up = udp_sk(skb->sk);
int cscov = up->len;
+ __wsum csum = 0;
- /*
- * Sender has set `partial coverage' option on UDP-Lite socket
- */
- if (up->pcflag & UDPLITE_SEND_CC) {
+ if (up->pcflag & UDPLITE_SEND_CC) {
+ /*
+ * Sender has set `partial coverage' option on UDP-Lite socket.
+ * The special case "up->pcslen == 0" signifies full coverage.
+ */
if (up->pcslen < up->len) {
- /* up->pcslen == 0 means that full coverage is required,
- * partial coverage only if 0 < up->pcslen < up->len */
- if (0 < up->pcslen) {
- cscov = up->pcslen;
- }
- uh->len = htons(up->pcslen);
+ if (0 < up->pcslen)
+ cscov = up->pcslen;
+ udp_hdr(skb)->len = htons(up->pcslen);
}
- /*
- * NOTE: Causes for the error case `up->pcslen > up->len':
- * (i) Application error (will not be penalized).
- * (ii) Payload too big for send buffer: data is split
- * into several packets, each with its own header.
- * In this case (e.g. last segment), coverage may
- * exceed packet length.
- * Since packets with coverage length > packet length are
- * illegal, we fall back to the defaults here.
- */
+ /*
+ * NOTE: Causes for the error case `up->pcslen > up->len':
+ * (i) Application error (will not be penalized).
+ * (ii) Payload too big for send buffer: data is split
+ * into several packets, each with its own header.
+ * In this case (e.g. last segment), coverage may
+ * exceed packet length.
+ * Since packets with coverage length > packet length are
+ * illegal, we fall back to the defaults here.
+ */
}
- return cscov;
-}
-
-static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
-{
- int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
- __wsum csum = 0;
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
@@ -115,16 +109,21 @@ static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
return csum;
}
+/* Fast-path computation of checksum. Socket may not be locked. */
static inline __wsum udplite_csum(struct sk_buff *skb)
{
- struct sock *sk = skb->sk;
- int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
+ const struct udp_sock *up = udp_sk(skb->sk);
const int off = skb_transport_offset(skb);
- const int len = skb->len - off;
+ int len = skb->len - off;
+ if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) {
+ if (0 < up->pcslen)
+ len = up->pcslen;
+ udp_hdr(skb)->len = htons(up->pcslen);
+ }
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
- return skb_checksum(skb, off, min(cscov, len), 0);
+ return skb_checksum(skb, off, len, 0);
}
extern void udplite4_register(void);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 27040653005..35aa786f93d 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -10,10 +10,7 @@
#include <net/tcp.h>
#define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml"
-#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT))
-/* Used by transport_generic_allocate_iovecs() */
-#define TRANSPORT_IOV_DATA_BUFFER 5
/* Maximum Number of LUNs per Target Portal Group */
/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
#define TRANSPORT_MAX_LUNS_PER_TPG 256
@@ -75,32 +72,26 @@ enum transport_tpg_type_table {
};
/* Used for generate timer flags */
-enum timer_flags_table {
- TF_RUNNING = 0x01,
- TF_STOP = 0x02,
+enum se_task_flags {
+ TF_ACTIVE = (1 << 0),
+ TF_SENT = (1 << 1),
+ TF_REQUEST_STOP = (1 << 2),
};
/* Special transport agnostic struct se_cmd->t_states */
enum transport_state_table {
TRANSPORT_NO_STATE = 0,
TRANSPORT_NEW_CMD = 1,
- TRANSPORT_DEFERRED_CMD = 2,
TRANSPORT_WRITE_PENDING = 3,
TRANSPORT_PROCESS_WRITE = 4,
TRANSPORT_PROCESSING = 5,
- TRANSPORT_COMPLETE_OK = 6,
- TRANSPORT_COMPLETE_FAILURE = 7,
- TRANSPORT_COMPLETE_TIMEOUT = 8,
+ TRANSPORT_COMPLETE = 6,
TRANSPORT_PROCESS_TMR = 9,
- TRANSPORT_TMR_COMPLETE = 10,
TRANSPORT_ISTATE_PROCESSING = 11,
- TRANSPORT_ISTATE_PROCESSED = 12,
- TRANSPORT_KILL = 13,
- TRANSPORT_REMOVE = 14,
- TRANSPORT_FREE = 15,
TRANSPORT_NEW_CMD_MAP = 16,
TRANSPORT_FREE_CMD_INTR = 17,
TRANSPORT_COMPLETE_QF_WP = 18,
+ TRANSPORT_COMPLETE_QF_OK = 19,
};
/* Used for struct se_cmd->se_cmd_flags */
@@ -125,7 +116,6 @@ enum se_cmd_flags_table {
SCF_UNUSED = 0x00100000,
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
SCF_EMULATE_CDB_ASYNC = 0x01000000,
- SCF_EMULATE_QUEUE_FULL = 0x02000000,
};
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -401,34 +391,22 @@ struct se_queue_obj {
} ____cacheline_aligned;
struct se_task {
- unsigned char task_sense;
- struct scatterlist *task_sg;
- u32 task_sg_nents;
- struct scatterlist *task_sg_bidi;
- u8 task_scsi_status;
- u8 task_flags;
- int task_error_status;
- int task_state_flags;
- bool task_padded_sg;
unsigned long long task_lba;
- u32 task_no;
- u32 task_sectors;
- u32 task_size;
+ u32 task_sectors;
+ u32 task_size;
+ struct se_cmd *task_se_cmd;
+ struct scatterlist *task_sg;
+ u32 task_sg_nents;
+ u16 task_flags;
+ u8 task_sense;
+ u8 task_scsi_status;
+ int task_error_status;
enum dma_data_direction task_data_direction;
- struct se_cmd *task_se_cmd;
- struct se_device *se_dev;
+ atomic_t task_state_active;
+ struct list_head t_list;
+ struct list_head t_execute_list;
+ struct list_head t_state_list;
struct completion task_stop_comp;
- atomic_t task_active;
- atomic_t task_execute_queue;
- atomic_t task_timeout;
- atomic_t task_sent;
- atomic_t task_stop;
- atomic_t task_state_active;
- struct timer_list task_timer;
- struct se_device *se_obj_ptr;
- struct list_head t_list;
- struct list_head t_execute_list;
- struct list_head t_state_list;
} ____cacheline_aligned;
struct se_cmd {
@@ -446,8 +424,6 @@ struct se_cmd {
int sam_task_attr;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
- /* Transport protocol dependent state for out of order CmdSNs */
- int deferred_t_state;
/* Transport specific error status */
int transport_error_status;
/* See se_cmd_flags_table */
@@ -461,7 +437,6 @@ struct se_cmd {
u32 orig_fe_lun;
/* Persistent Reservation key */
u64 pr_res_key;
- atomic_t transport_sent;
/* Used for sense data */
void *sense_buffer;
struct list_head se_delayed_node;
@@ -479,10 +454,7 @@ struct se_cmd {
struct list_head se_queue_node;
struct target_core_fabric_ops *se_tfo;
int (*transport_emulate_cdb)(struct se_cmd *);
- void (*transport_split_cdb)(unsigned long long, u32, unsigned char *);
- void (*transport_wait_for_tasks)(struct se_cmd *, int, int);
void (*transport_complete_callback)(struct se_cmd *);
- int (*transport_qf_callback)(struct se_cmd *);
unsigned char *t_task_cdb;
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
@@ -495,7 +467,6 @@ struct se_cmd {
atomic_t t_se_count;
atomic_t t_task_cdbs_left;
atomic_t t_task_cdbs_ex_left;
- atomic_t t_task_cdbs_timeout_left;
atomic_t t_task_cdbs_sent;
atomic_t t_transport_aborted;
atomic_t t_transport_active;
@@ -503,7 +474,6 @@ struct se_cmd {
atomic_t t_transport_queue_active;
atomic_t t_transport_sent;
atomic_t t_transport_stop;
- atomic_t t_transport_timeout;
atomic_t transport_dev_active;
atomic_t transport_lun_active;
atomic_t transport_lun_fe_stop;
@@ -514,6 +484,8 @@ struct se_cmd {
struct completion transport_lun_stop_comp;
struct scatterlist *t_tasks_sg_chained;
+ struct work_struct work;
+
/*
* Used for pre-registered fabric SGL passthrough WRITE and READ
* with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
@@ -670,7 +642,6 @@ struct se_dev_attrib {
u32 optimal_sectors;
u32 hw_queue_depth;
u32 queue_depth;
- u32 task_timeout;
u32 max_unmap_lba_count;
u32 max_unmap_block_desc_count;
u32 unmap_granularity;
diff --git a/include/target/target_core_tmr.h b/include/target/target_core_tmr.h
index bd559680747..d5876e17d3f 100644
--- a/include/target/target_core_tmr.h
+++ b/include/target/target_core_tmr.h
@@ -27,7 +27,7 @@ enum tcm_tmrsp_table {
extern struct kmem_cache *se_tmr_req_cache;
-extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8);
+extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
extern void core_tmr_release_req(struct se_tmr_req *);
extern int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
struct list_head *, struct se_cmd *);
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index 46aae4f94ed..a037a1a6fbb 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -22,10 +22,9 @@
#define PYX_TRANSPORT_LU_COMM_FAILURE -7
#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8
#define PYX_TRANSPORT_WRITE_PROTECTED -9
-#define PYX_TRANSPORT_TASK_TIMEOUT -10
-#define PYX_TRANSPORT_RESERVATION_CONFLICT -11
-#define PYX_TRANSPORT_ILLEGAL_REQUEST -12
-#define PYX_TRANSPORT_USE_SENSE_REASON -13
+#define PYX_TRANSPORT_RESERVATION_CONFLICT -10
+#define PYX_TRANSPORT_ILLEGAL_REQUEST -11
+#define PYX_TRANSPORT_USE_SENSE_REASON -12
#ifndef SAM_STAT_RESERVATION_CONFLICT
#define SAM_STAT_RESERVATION_CONFLICT 0x18
@@ -38,16 +37,6 @@
#define TRANSPORT_PLUGIN_VHBA_PDEV 2
#define TRANSPORT_PLUGIN_VHBA_VDEV 3
-/* For SE OBJ Plugins, in seconds */
-#define TRANSPORT_TIMEOUT_TUR 10
-#define TRANSPORT_TIMEOUT_TYPE_DISK 60
-#define TRANSPORT_TIMEOUT_TYPE_ROM 120
-#define TRANSPORT_TIMEOUT_TYPE_TAPE 600
-#define TRANSPORT_TIMEOUT_TYPE_OTHER 300
-
-/* For se_task->task_state_flags */
-#define TSF_EXCEPTION_CLEARED 0x01
-
/*
* struct se_subsystem_dev->su_dev_flags
*/
@@ -64,8 +53,6 @@
#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004
/* struct se_dev_attrib sanity values */
-/* 10 Minutes */
-#define DA_TASK_TIMEOUT_MAX 600
/* Default max_unmap_lba_count */
#define DA_MAX_UNMAP_LBA_COUNT 0
/* Default max_unmap_block_desc_count */
@@ -110,16 +97,13 @@
#define MOD_MAX_SECTORS(ms, bs) (ms % (PAGE_SIZE / bs))
-struct se_mem;
struct se_subsystem_api;
-extern struct kmem_cache *se_mem_cache;
-
extern int init_se_kmem_caches(void);
extern void release_se_kmem_caches(void);
extern u32 scsi_get_new_index(scsi_index_t);
extern void transport_init_queue_obj(struct se_queue_obj *);
-extern int transport_subsystem_check_init(void);
+extern void transport_subsystem_check_init(void);
extern int transport_subsystem_register(struct se_subsystem_api *);
extern void transport_subsystem_release(struct se_subsystem_api *);
extern void transport_load_plugins(void);
@@ -134,7 +118,6 @@ extern void transport_free_session(struct se_session *);
extern void transport_deregister_session_configfs(struct se_session *);
extern void transport_deregister_session(struct se_session *);
extern void transport_cmd_finish_abort(struct se_cmd *, int);
-extern void transport_cmd_finish_abort_tmr(struct se_cmd *);
extern void transport_complete_sync_cache(struct se_cmd *, int);
extern void transport_complete_task(struct se_task *, int);
extern void transport_add_task_to_execute_queue(struct se_task *,
@@ -142,6 +125,8 @@ extern void transport_add_task_to_execute_queue(struct se_task *,
struct se_device *);
extern void transport_remove_task_from_execute_queue(struct se_task *,
struct se_device *);
+extern void __transport_remove_task_from_execute_queue(struct se_task *,
+ struct se_device *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
extern void transport_dump_dev_state(struct se_device *, char *, int *);
extern void transport_dump_dev_info(struct se_device *, struct se_lun *,
@@ -169,29 +154,24 @@ extern void transport_init_se_cmd(struct se_cmd *,
unsigned char *);
void *transport_kmap_first_data_page(struct se_cmd *cmd);
void transport_kunmap_first_data_page(struct se_cmd *cmd);
-extern void transport_free_se_cmd(struct se_cmd *);
extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
-extern int transport_generic_handle_cdb(struct se_cmd *);
extern int transport_handle_cdb_direct(struct se_cmd *);
extern int transport_generic_handle_cdb_map(struct se_cmd *);
extern int transport_generic_handle_data(struct se_cmd *);
extern void transport_new_cmd_failure(struct se_cmd *);
extern int transport_generic_handle_tmr(struct se_cmd *);
extern void transport_generic_free_cmd_intr(struct se_cmd *);
-extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
+extern bool target_stop_task(struct se_task *task, unsigned long *flags);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
struct scatterlist *, u32);
extern int transport_clear_lun_from_sessions(struct se_lun *);
+extern void transport_wait_for_tasks(struct se_cmd *);
extern int transport_check_aborted_status(struct se_cmd *, int);
extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
extern void transport_send_task_abort(struct se_cmd *);
extern void transport_release_cmd(struct se_cmd *);
-extern void transport_generic_free_cmd(struct se_cmd *, int, int);
+extern void transport_generic_free_cmd(struct se_cmd *, int);
extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
-extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32);
-extern int transport_map_mem_to_sg(struct se_task *, struct list_head *,
- struct scatterlist *, struct se_mem *,
- struct se_mem **, u32 *, u32 *);
extern void transport_do_task_sg_chain(struct se_cmd *);
extern void transport_generic_process_write(struct se_cmd *);
extern int transport_generic_new_cmd(struct se_cmd *);
@@ -200,6 +180,7 @@ extern int transport_generic_do_tmr(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
/* From target_core_cdb.c */
extern int transport_emulate_control_cdb(struct se_task *);
+extern void target_get_task_cdb(struct se_task *task, unsigned char *cdb);
/*
* Each se_transport_task_t can have N number of possible struct se_task's
@@ -227,6 +208,10 @@ struct se_subsystem_api {
* Transport Type.
*/
u8 transport_type;
+
+ unsigned int fua_write_emulated : 1;
+ unsigned int write_cache_emulated : 1;
+
/*
* struct module for struct se_hba references
*/
@@ -236,18 +221,6 @@ struct se_subsystem_api {
*/
struct list_head sub_api_list;
/*
- * For SCF_SCSI_NON_DATA_CDB
- */
- int (*cdb_none)(struct se_task *);
- /*
- * For SCF_SCSI_DATA_SG_IO_CDB
- */
- int (*map_data_SG)(struct se_task *);
- /*
- * For SCF_SCSI_CONTROL_SG_IO_CDB
- */
- int (*map_control_SG)(struct se_task *);
- /*
* attach_hba():
*/
int (*attach_hba)(struct se_hba *, u32);
@@ -275,22 +248,6 @@ struct se_subsystem_api {
void (*free_device)(void *);
/*
- * dpo_emulated():
- */
- int (*dpo_emulated)(struct se_device *);
- /*
- * fua_write_emulated():
- */
- int (*fua_write_emulated)(struct se_device *);
- /*
- * fua_read_emulated():
- */
- int (*fua_read_emulated)(struct se_device *);
- /*
- * write_cache_emulated():
- */
- int (*write_cache_emulated)(struct se_device *);
- /*
* transport_complete():
*
* Use transport_generic_complete() for majority of DAS transport
@@ -331,10 +288,6 @@ struct se_subsystem_api {
ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
char *);
/*
- * get_cdb():
- */
- unsigned char *(*get_cdb)(struct se_task *);
- /*
* get_device_rev():
*/
u32 (*get_device_rev)(struct se_device *);
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 6bca4cc0063..5f172703eb4 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -298,7 +298,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
__array(char, name, 32)
__field(unsigned long, ino)
__field(unsigned long, state)
- __field(unsigned long, age)
+ __field(unsigned long, dirtied_when)
__field(unsigned long, writeback_index)
__field(long, nr_to_write)
__field(unsigned long, wrote)
@@ -309,19 +309,19 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
dev_name(inode->i_mapping->backing_dev_info->dev), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
- __entry->age = (jiffies - inode->dirtied_when) *
- 1000 / HZ;
+ __entry->dirtied_when = inode->dirtied_when;
__entry->writeback_index = inode->i_mapping->writeback_index;
__entry->nr_to_write = nr_to_write;
__entry->wrote = nr_to_write - wbc->nr_to_write;
),
- TP_printk("bdi %s: ino=%lu state=%s age=%lu "
+ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
"index=%lu to_write=%ld wrote=%lu",
__entry->name,
__entry->ino,
show_inode_state(__entry->state),
- __entry->age,
+ __entry->dirtied_when,
+ (jiffies - __entry->dirtied_when) / HZ,
__entry->writeback_index,
__entry->nr_to_write,
__entry->wrote
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 76f7538bb33..d29c153705b 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -25,8 +25,9 @@ extern struct balloon_stats balloon_stats;
void balloon_set_new_target(unsigned long target);
-int alloc_xenballooned_pages(int nr_pages, struct page** pages);
-void free_xenballooned_pages(int nr_pages, struct page** pages);
+int alloc_xenballooned_pages(int nr_pages, struct page **pages,
+ bool highmem);
+void free_xenballooned_pages(int nr_pages, struct page **pages);
struct sys_device;
#ifdef CONFIG_XEN_SELFBALLOONING
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index b1fab6b5b3e..6b99bfbd785 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -156,6 +156,7 @@ unsigned int gnttab_max_grant_frames(void);
#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct page **pages, unsigned int count);
diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
index 99fcffb372d..f0b6890370b 100644
--- a/include/xen/interface/io/xs_wire.h
+++ b/include/xen/interface/io/xs_wire.h
@@ -26,7 +26,11 @@ enum xsd_sockmsg_type
XS_SET_PERMS,
XS_WATCH_EVENT,
XS_ERROR,
- XS_IS_DOMAIN_INTRODUCED
+ XS_IS_DOMAIN_INTRODUCED,
+ XS_RESUME,
+ XS_SET_TARGET,
+ XS_RESTRICT,
+ XS_RESET_WATCHES
};
#define XS_WRITE_NONE "NONE"
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
index 534cac89a77..c1080d9c705 100644
--- a/include/xen/interface/physdev.h
+++ b/include/xen/interface/physdev.h
@@ -109,6 +109,7 @@ struct physdev_irq {
#define MAP_PIRQ_TYPE_MSI 0x0
#define MAP_PIRQ_TYPE_GSI 0x1
#define MAP_PIRQ_TYPE_UNKNOWN 0x2
+#define MAP_PIRQ_TYPE_MSI_SEG 0x3
#define PHYSDEVOP_map_pirq 13
struct physdev_map_pirq {
@@ -119,7 +120,7 @@ struct physdev_map_pirq {
int index;
/* IN or OUT */
int pirq;
- /* IN */
+ /* IN - high 16 bits hold segment for MAP_PIRQ_TYPE_MSI_SEG */
int bus;
/* IN */
int devfn;
@@ -198,6 +199,37 @@ struct physdev_get_free_pirq {
uint32_t pirq;
};
+#define XEN_PCI_DEV_EXTFN 0x1
+#define XEN_PCI_DEV_VIRTFN 0x2
+#define XEN_PCI_DEV_PXM 0x4
+
+#define PHYSDEVOP_pci_device_add 25
+struct physdev_pci_device_add {
+ /* IN */
+ uint16_t seg;
+ uint8_t bus;
+ uint8_t devfn;
+ uint32_t flags;
+ struct {
+ uint8_t bus;
+ uint8_t devfn;
+ } physfn;
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ uint32_t optarr[];
+#elif defined(__GNUC__)
+ uint32_t optarr[0];
+#endif
+};
+
+#define PHYSDEVOP_pci_device_remove 26
+#define PHYSDEVOP_restore_msi_ext 27
+struct physdev_pci_device {
+ /* IN */
+ uint16_t seg;
+ uint8_t bus;
+ uint8_t devfn;
+};
+
/*
* Notify that some PIRQ-bound event channels have been unmasked.
* ** This command is obsolete since interface version 0x00030202 and is **
diff --git a/include/xen/page.h b/include/xen/page.h
index 0be36b976f4..12765b6f951 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -3,6 +3,16 @@
#include <asm/xen/page.h>
-extern phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
+struct xen_memory_region {
+ phys_addr_t start;
+ phys_addr_t size;
+};
+
+#define XEN_EXTRA_MEM_MAX_REGIONS 128 /* == E820MAX */
+
+extern __initdata
+struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS];
+
+extern unsigned long xen_released_pages;
#endif /* _XEN_PAGE_H */
diff --git a/init/main.c b/init/main.c
index 9c51ee7adf3..03b408dff82 100644
--- a/init/main.c
+++ b/init/main.c
@@ -209,8 +209,19 @@ early_param("quiet", quiet_kernel);
static int __init loglevel(char *str)
{
- get_option(&str, &console_loglevel);
- return 0;
+ int newlevel;
+
+ /*
+ * Only update loglevel value when a correct setting was passed,
+ * to prevent blind crashes (when loglevel being set to 0) that
+ * are quite hard to debug
+ */
+ if (get_option(&str, &newlevel)) {
+ console_loglevel = newlevel;
+ return 0;
+ }
+
+ return -EINVAL;
}
early_param("loglevel", loglevel);
@@ -370,9 +381,6 @@ static noinline void __init_refok rest_init(void)
preempt_enable_no_resched();
schedule();
- /* At this point, we can enable user mode helper functionality */
- usermodehelper_enable();
-
/* Call into cpu_idle with preempt disabled */
preempt_disable();
cpu_idle();
@@ -722,6 +730,7 @@ static void __init do_basic_setup(void)
driver_init();
init_irq_proc();
do_ctors();
+ usermodehelper_enable();
do_initcalls();
}
diff --git a/kernel/cred.c b/kernel/cred.c
index 8ef31f53c44..bb55d052d85 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -644,6 +644,9 @@ void __init cred_init(void)
*/
struct cred *prepare_kernel_cred(struct task_struct *daemon)
{
+#ifdef CONFIG_KEYS
+ struct thread_group_cred *tgcred;
+#endif
const struct cred *old;
struct cred *new;
@@ -651,6 +654,14 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
if (!new)
return NULL;
+#ifdef CONFIG_KEYS
+ tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
+ if (!tgcred) {
+ kmem_cache_free(cred_jar, new);
+ return NULL;
+ }
+#endif
+
kdebug("prepare_kernel_cred() alloc %p", new);
if (daemon)
@@ -667,8 +678,11 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
get_group_info(new->group_info);
#ifdef CONFIG_KEYS
- atomic_inc(&init_tgcred.usage);
- new->tgcred = &init_tgcred;
+ atomic_set(&tgcred->usage, 1);
+ spin_lock_init(&tgcred->lock);
+ tgcred->process_keyring = NULL;
+ tgcred->session_keyring = NULL;
+ new->tgcred = tgcred;
new->request_key_auth = NULL;
new->thread_keyring = NULL;
new->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index d5a3009da71..dc5114b4c16 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -178,7 +178,7 @@ void irq_shutdown(struct irq_desc *desc)
desc->depth = 1;
if (desc->irq_data.chip->irq_shutdown)
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
- if (desc->irq_data.chip->irq_disable)
+ else if (desc->irq_data.chip->irq_disable)
desc->irq_data.chip->irq_disable(&desc->irq_data);
else
desc->irq_data.chip->irq_mask(&desc->irq_data);
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index d5828da3fd3..b57a3776de4 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -29,7 +29,11 @@ void irq_domain_add(struct irq_domain *domain)
*/
for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) {
d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
- if (d || d->domain) {
+ if (!d) {
+ WARN(1, "error: assigning domain to non existant irq_desc");
+ return;
+ }
+ if (d->domain) {
/* things are broken; just report, don't clean up */
WARN(1, "error: irq_desc already assigned to a domain");
return;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 58f405b581e..640ded8f5c4 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -250,7 +250,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
do {
times->utime = cputime_add(times->utime, t->utime);
times->stime = cputime_add(times->stime, t->stime);
- times->sum_exec_runtime += t->se.sum_exec_runtime;
+ times->sum_exec_runtime += task_sched_runtime(t);
} while_each_thread(tsk, t);
out:
rcu_read_unlock();
@@ -274,9 +274,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
struct task_cputime sum;
unsigned long flags;
- spin_lock_irqsave(&cputimer->lock, flags);
if (!cputimer->running) {
- cputimer->running = 1;
/*
* The POSIX timer interface allows for absolute time expiry
* values through the TIMER_ABSTIME flag, therefore we have
@@ -284,8 +282,11 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
* it.
*/
thread_group_cputime(tsk, &sum);
+ spin_lock_irqsave(&cputimer->lock, flags);
+ cputimer->running = 1;
update_gt_cputime(&cputimer->cputime, &sum);
- }
+ } else
+ spin_lock_irqsave(&cputimer->lock, flags);
*times = cputimer->cputime;
spin_unlock_irqrestore(&cputimer->lock, flags);
}
@@ -312,7 +313,8 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
cpu->cpu = cputime.utime;
break;
case CPUCLOCK_SCHED:
- cpu->sched = thread_group_sched_runtime(p);
+ thread_group_cputime(p, &cputime);
+ cpu->sched = cputime.sum_exec_runtime;
break;
}
return 0;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 9de3ecfd20f..a70d2a5d8c7 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -744,20 +744,17 @@ int ptrace_request(struct task_struct *child, long request,
break;
si = child->last_siginfo;
- if (unlikely(!si || si->si_code >> 8 != PTRACE_EVENT_STOP))
- break;
-
- child->jobctl |= JOBCTL_LISTENING;
-
- /*
- * If NOTIFY is set, it means event happened between start
- * of this trap and now. Trigger re-trap immediately.
- */
- if (child->jobctl & JOBCTL_TRAP_NOTIFY)
- signal_wake_up(child, true);
-
+ if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
+ child->jobctl |= JOBCTL_LISTENING;
+ /*
+ * If NOTIFY is set, it means event happened between
+ * start of this trap and now. Trigger re-trap.
+ */
+ if (child->jobctl & JOBCTL_TRAP_NOTIFY)
+ signal_wake_up(child, true);
+ ret = 0;
+ }
unlock_task_sighand(child, &flags);
- ret = 0;
break;
case PTRACE_DETACH: /* detach a process that was attached. */
diff --git a/kernel/resource.c b/kernel/resource.c
index 3b3cedc5259..c8dc249da5c 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -419,6 +419,9 @@ static int __find_resource(struct resource *root, struct resource *old,
else
tmp.end = root->end;
+ if (tmp.end < tmp.start)
+ goto next;
+
resource_clip(&tmp, constraint->min, constraint->max);
arch_remove_reservations(&tmp);
@@ -436,8 +439,10 @@ static int __find_resource(struct resource *root, struct resource *old,
return 0;
}
}
- if (!this)
+
+next: if (!this || this->end == root->end)
break;
+
if (this != old)
tmp.start = this->end + 1;
this = this->sibling;
diff --git a/kernel/sched.c b/kernel/sched.c
index aecd83b4ffe..8aa00803c1e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3725,30 +3725,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
}
/*
- * Return sum_exec_runtime for the thread group.
- * In case the task is currently running, return the sum plus current's
- * pending runtime that have not been accounted yet.
- *
- * Note that the thread group might have other running tasks as well,
- * so the return value not includes other pending runtime that other
- * running tasks might have.
- */
-unsigned long long thread_group_sched_runtime(struct task_struct *p)
-{
- struct task_cputime totals;
- unsigned long flags;
- struct rq *rq;
- u64 ns;
-
- rq = task_rq_lock(p, &flags);
- thread_group_cputime(p, &totals);
- ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
- task_rq_unlock(rq, p, &flags);
-
- return ns;
-}
-
-/*
* Account user cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in user space since the last update
@@ -4372,7 +4348,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
blk_schedule_flush_plug(tsk);
}
-asmlinkage void schedule(void)
+asmlinkage void __sched schedule(void)
{
struct task_struct *tsk = current;
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 97540f0c9e4..af1177858be 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1050,7 +1050,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
*/
if (curr && unlikely(rt_task(curr)) &&
(curr->rt.nr_cpus_allowed < 2 ||
- curr->prio < p->prio) &&
+ curr->prio <= p->prio) &&
(p->rt.nr_cpus_allowed > 1)) {
int target = find_lowest_rq(p);
@@ -1581,7 +1581,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
p->rt.nr_cpus_allowed > 1 &&
rt_task(rq->curr) &&
(rq->curr->rt.nr_cpus_allowed < 2 ||
- rq->curr->prio < p->prio))
+ rq->curr->prio <= p->prio))
push_rt_tasks(rq);
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 18ee1d2f647..1dbbe695a5e 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1172,7 +1172,7 @@ DECLARE_RWSEM(uts_sem);
static int override_release(char __user *release, int len)
{
int ret = 0;
- char buf[len];
+ char buf[65];
if (current->personality & UNAME26) {
char *rest = UTS_RELEASE;
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index e19ce1454ee..e66046456f4 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -655,6 +655,7 @@ static struct genl_ops taskstats_ops = {
.cmd = TASKSTATS_CMD_GET,
.doit = taskstats_user_cmd,
.policy = taskstats_cmd_get_policy,
+ .flags = GENL_ADMIN_PERM,
};
static struct genl_ops cgroupstats_ops = {
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 24dc60d9fa1..5bbfac85866 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -78,6 +78,7 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
#define KB 1024
#define MB (1024*KB)
+#define KB_MASK (~(KB-1))
/*
* fill in extended accounting fields
*/
@@ -95,14 +96,14 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB;
mmput(mm);
}
- stats->read_char = p->ioac.rchar;
- stats->write_char = p->ioac.wchar;
- stats->read_syscalls = p->ioac.syscr;
- stats->write_syscalls = p->ioac.syscw;
+ stats->read_char = p->ioac.rchar & KB_MASK;
+ stats->write_char = p->ioac.wchar & KB_MASK;
+ stats->read_syscalls = p->ioac.syscr & KB_MASK;
+ stats->write_syscalls = p->ioac.syscw & KB_MASK;
#ifdef CONFIG_TASK_IO_ACCOUNTING
- stats->read_bytes = p->ioac.read_bytes;
- stats->write_bytes = p->ioac.write_bytes;
- stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes;
+ stats->read_bytes = p->ioac.read_bytes & KB_MASK;
+ stats->write_bytes = p->ioac.write_bytes & KB_MASK;
+ stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes & KB_MASK;
#else
stats->read_bytes = 0;
stats->write_bytes = 0;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d0324986a3b..75330bd8756 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1081,7 +1081,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
- select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE
+ select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
help
Provide stacktrace filter for fault-injection capabilities
@@ -1091,7 +1091,7 @@ config LATENCYTOP
depends on DEBUG_KERNEL
depends on STACKTRACE_SUPPORT
depends on PROC_FS
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
select KALLSYMS
select KALLSYMS_ALL
select STACKTRACE
diff --git a/lib/hexdump.c b/lib/hexdump.c
index f5fe6ba7a3a..51d5ae21024 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -38,14 +38,21 @@ EXPORT_SYMBOL(hex_to_bin);
* @dst: binary result
* @src: ascii hexadecimal string
* @count: result length
+ *
+ * Return 0 on success, -1 in case of bad input.
*/
-void hex2bin(u8 *dst, const char *src, size_t count)
+int hex2bin(u8 *dst, const char *src, size_t count)
{
while (count--) {
- *dst = hex_to_bin(*src++) << 4;
- *dst += hex_to_bin(*src++);
- dst++;
+ int hi = hex_to_bin(*src++);
+ int lo = hex_to_bin(*src++);
+
+ if ((hi < 0) || (lo < 0))
+ return -1;
+
+ *dst++ = (hi << 4) | lo;
}
+ return 0;
}
EXPORT_SYMBOL(hex2bin);
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
index e51e2558ca9..a768e6d28bb 100644
--- a/lib/xz/xz_dec_bcj.c
+++ b/lib/xz/xz_dec_bcj.c
@@ -441,8 +441,12 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
* next filter in the chain. Apply the BCJ filter on the new data
* in the output buffer. If everything cannot be filtered, copy it
* to temp and rewind the output buffer position accordingly.
+ *
+ * This needs to be always run when temp.size == 0 to handle a special
+ * case where the output buffer is full and the next filter has no
+ * more output coming but hasn't returned XZ_STREAM_END yet.
*/
- if (s->temp.size < b->out_size - b->out_pos) {
+ if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) {
out_start = b->out_pos;
memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size);
b->out_pos += s->temp.size;
@@ -465,16 +469,25 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
s->temp.size = b->out_pos - out_start;
b->out_pos -= s->temp.size;
memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size);
+
+ /*
+ * If there wasn't enough input to the next filter to fill
+ * the output buffer with unfiltered data, there's no point
+ * to try decoding more data to temp.
+ */
+ if (b->out_pos + s->temp.size < b->out_size)
+ return XZ_OK;
}
/*
- * If we have unfiltered data in temp, try to fill by decoding more
- * data from the next filter. Apply the BCJ filter on temp. Then we
- * hopefully can fill the actual output buffer by copying filtered
- * data from temp. A mix of filtered and unfiltered data may be left
- * in temp; it will be taken care on the next call to this function.
+ * We have unfiltered data in temp. If the output buffer isn't full
+ * yet, try to fill the temp buffer by decoding more data from the
+ * next filter. Apply the BCJ filter on temp. Then we hopefully can
+ * fill the actual output buffer by copying filtered data from temp.
+ * A mix of filtered and unfiltered data may be left in temp; it will
+ * be taken care on the next call to this function.
*/
- if (s->temp.size > 0) {
+ if (b->out_pos < b->out_size) {
/* Make b->out{,_pos,_size} temporarily point to s->temp. */
s->out = b->out;
s->out_pos = b->out_pos;
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index d6edf8d14f9..a87da524a4a 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -359,6 +359,17 @@ static unsigned long bdi_longest_inactive(void)
return max(5UL * 60 * HZ, interval);
}
+/*
+ * Clear pending bit and wakeup anybody waiting for flusher thread creation or
+ * shutdown
+ */
+static void bdi_clear_pending(struct backing_dev_info *bdi)
+{
+ clear_bit(BDI_pending, &bdi->state);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&bdi->state, BDI_pending);
+}
+
static int bdi_forker_thread(void *ptr)
{
struct bdi_writeback *me = ptr;
@@ -390,6 +401,13 @@ static int bdi_forker_thread(void *ptr)
}
spin_lock_bh(&bdi_lock);
+ /*
+ * In the following loop we are going to check whether we have
+ * some work to do without any synchronization with tasks
+ * waking us up to do work for them. So we have to set task
+ * state already here so that we don't miss wakeups coming
+ * after we verify some condition.
+ */
set_current_state(TASK_INTERRUPTIBLE);
list_for_each_entry(bdi, &bdi_list, bdi_list) {
@@ -469,11 +487,13 @@ static int bdi_forker_thread(void *ptr)
spin_unlock_bh(&bdi->wb_lock);
wake_up_process(task);
}
+ bdi_clear_pending(bdi);
break;
case KILL_THREAD:
__set_current_state(TASK_RUNNING);
kthread_stop(task);
+ bdi_clear_pending(bdi);
break;
case NO_ACTION:
@@ -489,16 +509,8 @@ static int bdi_forker_thread(void *ptr)
else
schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
try_to_freeze();
- /* Back to the main loop */
- continue;
+ break;
}
-
- /*
- * Clear pending bit and wakeup anybody waiting to tear us down.
- */
- clear_bit(BDI_pending, &bdi->state);
- smp_mb__after_clear_bit();
- wake_up_bit(&bdi->state, BDI_pending);
}
return 0;
diff --git a/mm/migrate.c b/mm/migrate.c
index 666e4e67741..14d0a6a632f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -120,10 +120,10 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
ptep = pte_offset_map(pmd, addr);
- if (!is_swap_pte(*ptep)) {
- pte_unmap(ptep);
- goto out;
- }
+ /*
+ * Peek to check is_swap_pte() before taking ptlock? No, we
+ * can race mremap's move_ptes(), which skips anon_vma lock.
+ */
ptl = pte_lockptr(mm, pmd);
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 32f6763f16f..2d357729529 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1458,7 +1458,7 @@ shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
if (inode) {
error = security_inode_init_security(inode, dir,
- &dentry->d_name, NULL,
+ &dentry->d_name,
NULL, NULL);
if (error) {
if (error != -EOPNOTSUPP) {
@@ -1598,7 +1598,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
if (!inode)
return -ENOSPC;
- error = security_inode_init_security(inode, dir, &dentry->d_name, NULL,
+ error = security_inode_init_security(inode, dir, &dentry->d_name,
NULL, NULL);
if (error) {
if (error != -EOPNOTSUPP) {
diff --git a/mm/slub.c b/mm/slub.c
index 9f662d70eb4..7c54fe83a90 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2377,7 +2377,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/
if (unlikely(!prior)) {
remove_full(s, page);
- add_partial(n, page, 0);
+ add_partial(n, page, 1);
stat(s, FREE_ADD_PARTIAL);
}
}
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 3e2f91ffa4e..05dd35114a2 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -565,7 +565,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
struct orig_node *orig_node = NULL;
int data_len = skb->len, ret;
short vid = -1;
- bool do_bcast = false;
+ bool do_bcast;
if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
goto dropped;
@@ -598,15 +598,15 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
tt_local_add(soft_iface, ethhdr->h_source);
orig_node = transtable_search(bat_priv, ethhdr->h_dest);
- if (is_multicast_ether_addr(ethhdr->h_dest) ||
- (orig_node && orig_node->gw_flags)) {
+ do_bcast = is_multicast_ether_addr(ethhdr->h_dest);
+ if (do_bcast || (orig_node && orig_node->gw_flags)) {
ret = gw_is_target(bat_priv, skb, orig_node);
if (ret < 0)
goto dropped;
- if (ret == 0)
- do_bcast = true;
+ if (ret)
+ do_bcast = false;
}
/* ethernet packet should be broadcasted */
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index a40170e022e..7ef4eb4435f 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -58,8 +58,8 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
if (status)
return;
- if (test_bit(HCI_MGMT, &hdev->flags) &&
- test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+ if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
+ test_bit(HCI_MGMT, &hdev->flags))
mgmt_discovering(hdev->id, 0);
hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
@@ -76,8 +76,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
if (status)
return;
- if (test_bit(HCI_MGMT, &hdev->flags) &&
- test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+ if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
+ test_bit(HCI_MGMT, &hdev->flags))
mgmt_discovering(hdev->id, 0);
hci_conn_check_pending(hdev);
@@ -959,9 +959,8 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
return;
}
- if (test_bit(HCI_MGMT, &hdev->flags) &&
- !test_and_set_bit(HCI_INQUIRY,
- &hdev->flags))
+ if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) &&
+ test_bit(HCI_MGMT, &hdev->flags))
mgmt_discovering(hdev->id, 1);
}
@@ -1340,8 +1339,8 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
BT_DBG("%s status %d", hdev->name, status);
- if (test_bit(HCI_MGMT, &hdev->flags) &&
- test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+ if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
+ test_bit(HCI_MGMT, &hdev->flags))
mgmt_discovering(hdev->id, 0);
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 61f1f623091..e8292369cdc 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -26,6 +26,8 @@
/* Bluetooth L2CAP sockets. */
+#include <linux/security.h>
+
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
@@ -933,6 +935,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
chan->force_reliable = pchan->force_reliable;
chan->flushable = pchan->flushable;
chan->force_active = pchan->force_active;
+
+ security_sk_clone(parent, sk);
} else {
switch (sk->sk_type) {
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 482722bbc7a..5417f612732 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -42,6 +42,7 @@
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/security.h>
#include <net/sock.h>
#include <asm/system.h>
@@ -264,6 +265,8 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
pi->sec_level = rfcomm_pi(parent)->sec_level;
pi->role_switch = rfcomm_pi(parent)->role_switch;
+
+ security_sk_clone(parent, sk);
} else {
pi->dlc->defer_setup = 0;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 8270f05e3f1..a324b009e34 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -41,6 +41,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/list.h>
+#include <linux/security.h>
#include <net/sock.h>
#include <asm/system.h>
@@ -403,8 +404,10 @@ static void sco_sock_init(struct sock *sk, struct sock *parent)
{
BT_DBG("sk %p", sk);
- if (parent)
+ if (parent) {
sk->sk_type = parent->sk_type;
+ security_sk_clone(parent, sk);
+ }
}
static struct proto sco_proto = {
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 32b8f9f7f79..ff3ed6086ce 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -91,7 +91,6 @@ static int br_dev_open(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
- netif_carrier_off(dev);
netdev_update_features(dev);
netif_start_queue(dev);
br_stp_enable_bridge(br);
@@ -108,8 +107,6 @@ static int br_dev_stop(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
- netif_carrier_off(dev);
-
br_stp_disable_bridge(br);
br_multicast_stop(br);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index e73815456ad..1d420f64ff2 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -161,9 +161,10 @@ static void del_nbp(struct net_bridge_port *p)
call_rcu(&p->rcu, destroy_nbp_rcu);
}
-/* called with RTNL */
-static void del_br(struct net_bridge *br, struct list_head *head)
+/* Delete bridge device */
+void br_dev_delete(struct net_device *dev, struct list_head *head)
{
+ struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p, *n;
list_for_each_entry_safe(p, n, &br->port_list, list) {
@@ -268,7 +269,7 @@ int br_del_bridge(struct net *net, const char *name)
}
else
- del_br(netdev_priv(dev), NULL);
+ br_dev_delete(dev, NULL);
rtnl_unlock();
return ret;
@@ -449,7 +450,7 @@ void __net_exit br_net_exit(struct net *net)
rtnl_lock();
for_each_netdev(net, dev)
if (dev->priv_flags & IFF_EBRIDGE)
- del_br(netdev_priv(dev), &list);
+ br_dev_delete(dev, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 5b1ed1ba9aa..e5f9ece3c9a 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -210,6 +210,7 @@ static struct rtnl_link_ops br_link_ops __read_mostly = {
.priv_size = sizeof(struct net_bridge),
.setup = br_dev_setup,
.validate = br_validate,
+ .dellink = br_dev_delete,
};
int __init br_netlink_init(void)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 78cc364997d..857a021deea 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -294,6 +294,7 @@ static inline int br_is_root_bridge(const struct net_bridge *br)
/* br_device.c */
extern void br_dev_setup(struct net_device *dev);
+extern void br_dev_delete(struct net_device *dev, struct list_head *list);
extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
struct net_device *dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index ba6f73eb06c..a9aff9c7d02 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -4,7 +4,7 @@
menuconfig BRIDGE_NF_EBTABLES
tristate "Ethernet Bridge tables (ebtables) support"
- depends on BRIDGE && BRIDGE_NETFILTER
+ depends on BRIDGE && NETFILTER
select NETFILTER_XTABLES
help
ebtables is a general, extensible frame/packet identification
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 7c2fa0a0814..7f9ac0742d1 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -93,10 +93,14 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
caifdevs = caif_device_list(dev_net(dev));
BUG_ON(!caifdevs);
- caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
+ caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
if (!caifd)
return NULL;
caifd->pcpu_refcnt = alloc_percpu(int);
+ if (!caifd->pcpu_refcnt) {
+ kfree(caifd);
+ return NULL;
+ }
caifd->netdev = dev;
dev_hold(dev);
return caifd;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 8ce926d3b2c..9b0c32a2690 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -857,7 +857,7 @@ static __exit void can_exit(void)
struct net_device *dev;
if (stats_timer)
- del_timer(&can_stattimer);
+ del_timer_sync(&can_stattimer);
can_remove_proc();
diff --git a/net/can/bcm.c b/net/can/bcm.c
index d6c8ae5b2e6..c84963d2dee 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -344,6 +344,18 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
}
}
+static void bcm_tx_start_timer(struct bcm_op *op)
+{
+ if (op->kt_ival1.tv64 && op->count)
+ hrtimer_start(&op->timer,
+ ktime_add(ktime_get(), op->kt_ival1),
+ HRTIMER_MODE_ABS);
+ else if (op->kt_ival2.tv64)
+ hrtimer_start(&op->timer,
+ ktime_add(ktime_get(), op->kt_ival2),
+ HRTIMER_MODE_ABS);
+}
+
static void bcm_tx_timeout_tsklet(unsigned long data)
{
struct bcm_op *op = (struct bcm_op *)data;
@@ -365,26 +377,12 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
bcm_send_to_user(op, &msg_head, NULL, 0);
}
- }
-
- if (op->kt_ival1.tv64 && (op->count > 0)) {
-
- /* send (next) frame */
bcm_can_tx(op);
- hrtimer_start(&op->timer,
- ktime_add(ktime_get(), op->kt_ival1),
- HRTIMER_MODE_ABS);
- } else {
- if (op->kt_ival2.tv64) {
+ } else if (op->kt_ival2.tv64)
+ bcm_can_tx(op);
- /* send (next) frame */
- bcm_can_tx(op);
- hrtimer_start(&op->timer,
- ktime_add(ktime_get(), op->kt_ival2),
- HRTIMER_MODE_ABS);
- }
- }
+ bcm_tx_start_timer(op);
}
/*
@@ -964,23 +962,20 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
hrtimer_cancel(&op->timer);
}
- if ((op->flags & STARTTIMER) &&
- ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) {
-
+ if (op->flags & STARTTIMER) {
+ hrtimer_cancel(&op->timer);
/* spec: send can_frame when starting timer */
op->flags |= TX_ANNOUNCE;
-
- if (op->kt_ival1.tv64 && (op->count > 0)) {
- /* op->count-- is done in bcm_tx_timeout_handler */
- hrtimer_start(&op->timer, op->kt_ival1,
- HRTIMER_MODE_REL);
- } else
- hrtimer_start(&op->timer, op->kt_ival2,
- HRTIMER_MODE_REL);
}
- if (op->flags & TX_ANNOUNCE)
+ if (op->flags & TX_ANNOUNCE) {
bcm_can_tx(op);
+ if (op->count)
+ op->count--;
+ }
+
+ if (op->flags & STARTTIMER)
+ bcm_tx_start_timer(op);
return msg_head->nframes * CFSIZ + MHSIZ;
}
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 132963abc26..2883ea01e68 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -232,6 +232,7 @@ void ceph_destroy_options(struct ceph_options *opt)
ceph_crypto_key_destroy(opt->key);
kfree(opt->key);
}
+ kfree(opt->mon_addr);
kfree(opt);
}
EXPORT_SYMBOL(ceph_destroy_options);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index c340e2e0765..9918e9eb276 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2307,6 +2307,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
m->front_max = front_len;
m->front_is_vmalloc = false;
m->more_to_follow = false;
+ m->ack_stamp = 0;
m->pool = NULL;
/* middle */
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 16836a7df7a..88ad8a2501b 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -217,6 +217,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
INIT_LIST_HEAD(&req->r_unsafe_item);
INIT_LIST_HEAD(&req->r_linger_item);
INIT_LIST_HEAD(&req->r_linger_osd);
+ INIT_LIST_HEAD(&req->r_req_lru_item);
req->r_flags = flags;
WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
@@ -816,13 +817,10 @@ static void __register_request(struct ceph_osd_client *osdc,
{
req->r_tid = ++osdc->last_tid;
req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
- INIT_LIST_HEAD(&req->r_req_lru_item);
-
dout("__register_request %p tid %lld\n", req, req->r_tid);
__insert_request(osdc, req);
ceph_osdc_get_request(req);
osdc->num_requests++;
-
if (osdc->num_requests == 1) {
dout(" first request, scheduling timeout\n");
__schedule_osd_timeout(osdc);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index e97c3588c3e..fd863fe7693 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -339,6 +339,7 @@ static int __insert_pg_mapping(struct ceph_pg_mapping *new,
struct ceph_pg_mapping *pg = NULL;
int c;
+ dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new);
while (*p) {
parent = *p;
pg = rb_entry(parent, struct ceph_pg_mapping, node);
@@ -366,16 +367,33 @@ static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
while (n) {
pg = rb_entry(n, struct ceph_pg_mapping, node);
c = pgid_cmp(pgid, pg->pgid);
- if (c < 0)
+ if (c < 0) {
n = n->rb_left;
- else if (c > 0)
+ } else if (c > 0) {
n = n->rb_right;
- else
+ } else {
+ dout("__lookup_pg_mapping %llx got %p\n",
+ *(u64 *)&pgid, pg);
return pg;
+ }
}
return NULL;
}
+static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid)
+{
+ struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid);
+
+ if (pg) {
+ dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg);
+ rb_erase(&pg->node, root);
+ kfree(pg);
+ return 0;
+ }
+ dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid);
+ return -ENOENT;
+}
+
/*
* rbtree of pg pool info
*/
@@ -711,7 +729,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
void *start = *p;
int err = -EINVAL;
u16 version;
- struct rb_node *rbp;
ceph_decode_16_safe(p, end, version, bad);
if (version > CEPH_OSDMAP_INC_VERSION) {
@@ -861,7 +878,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
}
/* new_pg_temp */
- rbp = rb_first(&map->pg_temp);
ceph_decode_32_safe(p, end, len, bad);
while (len--) {
struct ceph_pg_mapping *pg;
@@ -872,18 +888,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
ceph_decode_copy(p, &pgid, sizeof(pgid));
pglen = ceph_decode_32(p);
- /* remove any? */
- while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping,
- node)->pgid, pgid) <= 0) {
- struct ceph_pg_mapping *cur =
- rb_entry(rbp, struct ceph_pg_mapping, node);
-
- rbp = rb_next(rbp);
- dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
- rb_erase(&cur->node, &map->pg_temp);
- kfree(cur);
- }
-
if (pglen) {
/* insert */
ceph_decode_need(p, end, pglen*sizeof(u32), bad);
@@ -903,17 +907,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
}
dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
pglen);
+ } else {
+ /* remove */
+ __remove_pg_mapping(&map->pg_temp, pgid);
}
}
- while (rbp) {
- struct ceph_pg_mapping *cur =
- rb_entry(rbp, struct ceph_pg_mapping, node);
-
- rbp = rb_next(rbp);
- dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
- rb_erase(&cur->node, &map->pg_temp);
- kfree(cur);
- }
/* ignore the rest */
*p = end;
@@ -1046,10 +1044,25 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
struct ceph_pg_mapping *pg;
struct ceph_pg_pool_info *pool;
int ruleno;
- unsigned poolid, ps, pps;
+ unsigned poolid, ps, pps, t;
int preferred;
+ poolid = le32_to_cpu(pgid.pool);
+ ps = le16_to_cpu(pgid.ps);
+ preferred = (s16)le16_to_cpu(pgid.preferred);
+
+ pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
+ if (!pool)
+ return NULL;
+
/* pg_temp? */
+ if (preferred >= 0)
+ t = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpg_num),
+ pool->lpgp_num_mask);
+ else
+ t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
+ pool->pgp_num_mask);
+ pgid.ps = cpu_to_le16(t);
pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
if (pg) {
*num = pg->len;
@@ -1057,18 +1070,6 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
}
/* crush */
- poolid = le32_to_cpu(pgid.pool);
- ps = le16_to_cpu(pgid.ps);
- preferred = (s16)le16_to_cpu(pgid.preferred);
-
- /* don't forcefeed bad device ids to crush */
- if (preferred >= osdmap->max_osd ||
- preferred >= osdmap->crush->max_devices)
- preferred = -1;
-
- pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
- if (!pool)
- return NULL;
ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
pool->v.type, pool->v.size);
if (ruleno < 0) {
@@ -1078,6 +1079,11 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
return NULL;
}
+ /* don't forcefeed bad device ids to crush */
+ if (preferred >= osdmap->max_osd ||
+ preferred >= osdmap->crush->max_devices)
+ preferred = -1;
+
if (preferred >= 0)
pps = ceph_stable_mod(ps,
le32_to_cpu(pool->v.lpgp_num),
diff --git a/net/core/dev.c b/net/core/dev.c
index 17d67b579be..b10ff0a7185 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1515,6 +1515,14 @@ static inline bool is_skb_forwardable(struct net_device *dev,
*/
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
+ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
+ if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
+ atomic_long_inc(&dev->rx_dropped);
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+ }
+
skb_orphan(skb);
nf_reset(skb);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index e7ab0c0285b..27071ee2a4e 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -384,8 +384,8 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
*/
list_for_each_entry(r, &ops->rules_list, list) {
if (r->action == FR_ACT_GOTO &&
- r->target == rule->pref) {
- BUG_ON(rtnl_dereference(r->ctarget) != NULL);
+ r->target == rule->pref &&
+ rtnl_dereference(r->ctarget) == NULL) {
rcu_assign_pointer(r->ctarget, rule);
if (--ops->unresolved_rules == 0)
break;
@@ -475,8 +475,11 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
list_del_rcu(&rule->list);
- if (rule->action == FR_ACT_GOTO)
+ if (rule->action == FR_ACT_GOTO) {
ops->nr_goto_rules--;
+ if (rtnl_dereference(rule->ctarget) == NULL)
+ ops->unresolved_rules--;
+ }
/*
* Check if this rule is a target to any of them. If so,
diff --git a/net/core/flow.c b/net/core/flow.c
index bf32c33cad3..555a456efb0 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -30,6 +30,7 @@ struct flow_cache_entry {
struct hlist_node hlist;
struct list_head gc_list;
} u;
+ struct net *net;
u16 family;
u8 dir;
u32 genid;
@@ -172,29 +173,26 @@ static void flow_new_hash_rnd(struct flow_cache *fc,
static u32 flow_hash_code(struct flow_cache *fc,
struct flow_cache_percpu *fcp,
- const struct flowi *key)
+ const struct flowi *key,
+ size_t keysize)
{
const u32 *k = (const u32 *) key;
+ const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
- return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
+ return jhash2(k, length, fcp->hash_rnd)
& (flow_cache_hash_size(fc) - 1);
}
-typedef unsigned long flow_compare_t;
-
/* I hear what you're saying, use memcmp. But memcmp cannot make
- * important assumptions that we can here, such as alignment and
- * constant size.
+ * important assumptions that we can here, such as alignment.
*/
-static int flow_key_compare(const struct flowi *key1, const struct flowi *key2)
+static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
+ size_t keysize)
{
const flow_compare_t *k1, *k1_lim, *k2;
- const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
-
- BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
k1 = (const flow_compare_t *) key1;
- k1_lim = k1 + n_elem;
+ k1_lim = k1 + keysize;
k2 = (const flow_compare_t *) key2;
@@ -215,6 +213,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
struct flow_cache_entry *fle, *tfle;
struct hlist_node *entry;
struct flow_cache_object *flo;
+ size_t keysize;
unsigned int hash;
local_bh_disable();
@@ -222,6 +221,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
fle = NULL;
flo = NULL;
+
+ keysize = flow_key_size(family);
+ if (!keysize)
+ goto nocache;
+
/* Packet really early in init? Making flow_cache_init a
* pre-smp initcall would solve this. --RR */
if (!fcp->hash_table)
@@ -230,11 +234,12 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
if (fcp->hash_rnd_recalc)
flow_new_hash_rnd(fc, fcp);
- hash = flow_hash_code(fc, fcp, key);
+ hash = flow_hash_code(fc, fcp, key, keysize);
hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
- if (tfle->family == family &&
+ if (tfle->net == net &&
+ tfle->family == family &&
tfle->dir == dir &&
- flow_key_compare(key, &tfle->key) == 0) {
+ flow_key_compare(key, &tfle->key, keysize) == 0) {
fle = tfle;
break;
}
@@ -246,9 +251,10 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
if (fle) {
+ fle->net = net;
fle->family = family;
fle->dir = dir;
- memcpy(&fle->key, key, sizeof(*key));
+ memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
fle->object = NULL;
hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
fcp->hash_count++;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 27002dffe7e..387703f56fc 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -611,8 +611,21 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
}
EXPORT_SYMBOL_GPL(skb_morph);
-/* skb frags copy userspace buffers to kernel */
-static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
+/* skb_copy_ubufs - copy userspace skb frags buffers to kernel
+ * @skb: the skb to modify
+ * @gfp_mask: allocation priority
+ *
+ * This must be called on SKBTX_DEV_ZEROCOPY skb.
+ * It will copy all frags into kernel and drop the reference
+ * to userspace pages.
+ *
+ * If this function is called from an interrupt gfp_mask() must be
+ * %GFP_ATOMIC.
+ *
+ * Returns 0 on success or a negative error code on failure
+ * to allocate kernel memory to copy to.
+ */
+int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
{
int i;
int num_frags = skb_shinfo(skb)->nr_frags;
@@ -652,6 +665,8 @@ static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
skb_shinfo(skb)->frags[i - 1].page = head;
head = (struct page *)head->private;
}
+
+ skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
return 0;
}
@@ -677,7 +692,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, gfp_mask))
return NULL;
- skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
}
n = skb + 1;
@@ -803,7 +817,6 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
n = NULL;
goto out;
}
- skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
@@ -896,7 +909,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, gfp_mask))
goto nofrags;
- skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
get_page(skb_shinfo(skb)->frags[i].page);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 27997d35ebd..a2468363978 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -340,7 +340,7 @@ void ether_setup(struct net_device *dev)
dev->addr_len = ETH_ALEN;
dev->tx_queue_len = 1000; /* Ethernet wants good queues */
dev->flags = IFF_BROADCAST|IFF_MULTICAST;
- dev->priv_flags = IFF_TX_SKB_SHARING;
+ dev->priv_flags |= IFF_TX_SKB_SHARING;
memset(dev->broadcast, 0xFF, ETH_ALEN);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1b745d412cf..dd2b9478ddd 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -466,8 +466,13 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
if (addr->sin_family != AF_INET) {
+ /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
+ * only if s_addr is INADDR_ANY.
+ */
err = -EAFNOSUPPORT;
- goto out;
+ if (addr->sin_family != AF_UNSPEC ||
+ addr->sin_addr.s_addr != htonl(INADDR_ANY))
+ goto out;
}
chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 33e2c35b74b..80106d89d54 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -142,6 +142,14 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {
};
/* Release a nexthop info record */
+static void free_fib_info_rcu(struct rcu_head *head)
+{
+ struct fib_info *fi = container_of(head, struct fib_info, rcu);
+
+ if (fi->fib_metrics != (u32 *) dst_default_metrics)
+ kfree(fi->fib_metrics);
+ kfree(fi);
+}
void free_fib_info(struct fib_info *fi)
{
@@ -156,7 +164,7 @@ void free_fib_info(struct fib_info *fi)
} endfor_nexthops(fi);
fib_info_cnt--;
release_net(fi->fib_net);
- kfree_rcu(fi, rcu);
+ call_rcu(&fi->rcu, free_fib_info_rcu);
}
void fib_release_info(struct fib_info *fi)
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 5c9b9d96391..e59aabd0eae 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
return skb;
nlmsg_failure:
+ kfree_skb(skb);
*errp = -EINVAL;
printk(KERN_ERR "ip_queue: error creating packet message\n");
return NULL;
@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
{
struct nf_queue_entry *entry;
- if (vmsg->value > NF_MAX_VERDICT)
+ if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
return -EINVAL;
entry = ipq_find_dequeue_entry(vmsg->id);
@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
break;
case IPQM_VERDICT:
- if (pmsg->msg.verdict.value > NF_MAX_VERDICT)
- status = -EINVAL;
- else
- status = ipq_set_verdict(&pmsg->msg.verdict,
- len - sizeof(*pmsg));
- break;
+ status = ipq_set_verdict(&pmsg->msg.verdict,
+ len - sizeof(*pmsg));
+ break;
default:
status = -EINVAL;
}
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index b14ec7d03b6..4bfad5da94f 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -254,6 +254,8 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
+ SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES),
+ SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ea0d2183df4..d73aab3fbfc 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1124,7 +1124,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
return 0;
/* ...Then it's D-SACK, and must reside below snd_una completely */
- if (!after(end_seq, tp->snd_una))
+ if (after(end_seq, tp->snd_una))
return 0;
if (!before(start_seq, tp->undo_marker))
@@ -1389,9 +1389,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
BUG_ON(!pcount);
- /* Tweak before seqno plays */
- if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint &&
- !before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
+ if (skb == tp->lost_skb_hint)
tp->lost_cnt_hint += pcount;
TCP_SKB_CB(prev)->end_seq += shifted;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1c12b8ec849..7963e03f106 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -808,20 +808,38 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
kfree(inet_rsk(req)->opt);
}
-static void syn_flood_warning(const struct sk_buff *skb)
+/*
+ * Return 1 if a syncookie should be sent
+ */
+int tcp_syn_flood_action(struct sock *sk,
+ const struct sk_buff *skb,
+ const char *proto)
{
- const char *msg;
+ const char *msg = "Dropping request";
+ int want_cookie = 0;
+ struct listen_sock *lopt;
+
+
#ifdef CONFIG_SYN_COOKIES
- if (sysctl_tcp_syncookies)
+ if (sysctl_tcp_syncookies) {
msg = "Sending cookies";
- else
+ want_cookie = 1;
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
+ } else
#endif
- msg = "Dropping request";
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
- pr_info("TCP: Possible SYN flooding on port %d. %s.\n",
- ntohs(tcp_hdr(skb)->dest), msg);
+ lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
+ if (!lopt->synflood_warned) {
+ lopt->synflood_warned = 1;
+ pr_info("%s: Possible SYN flooding on port %d. %s. "
+ " Check SNMP counters.\n",
+ proto, ntohs(tcp_hdr(skb)->dest), msg);
+ }
+ return want_cookie;
}
+EXPORT_SYMBOL(tcp_syn_flood_action);
/*
* Save and compile IPv4 options into the request_sock if needed.
@@ -909,18 +927,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
}
sk_nocaps_add(sk, NETIF_F_GSO_MASK);
}
- if (tcp_alloc_md5sig_pool(sk) == NULL) {
+
+ md5sig = tp->md5sig_info;
+ if (md5sig->entries4 == 0 &&
+ tcp_alloc_md5sig_pool(sk) == NULL) {
kfree(newkey);
return -ENOMEM;
}
- md5sig = tp->md5sig_info;
if (md5sig->alloced4 == md5sig->entries4) {
keys = kmalloc((sizeof(*keys) *
(md5sig->entries4 + 1)), GFP_ATOMIC);
if (!keys) {
kfree(newkey);
- tcp_free_md5sig_pool();
+ if (md5sig->entries4 == 0)
+ tcp_free_md5sig_pool();
return -ENOMEM;
}
@@ -964,6 +985,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
kfree(tp->md5sig_info->keys4);
tp->md5sig_info->keys4 = NULL;
tp->md5sig_info->alloced4 = 0;
+ tcp_free_md5sig_pool();
} else if (tp->md5sig_info->entries4 != i) {
/* Need to do some manipulation */
memmove(&tp->md5sig_info->keys4[i],
@@ -971,7 +993,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
(tp->md5sig_info->entries4 - i) *
sizeof(struct tcp4_md5sig_key));
}
- tcp_free_md5sig_pool();
return 0;
}
}
@@ -1235,11 +1256,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
__be32 saddr = ip_hdr(skb)->saddr;
__be32 daddr = ip_hdr(skb)->daddr;
__u32 isn = TCP_SKB_CB(skb)->when;
-#ifdef CONFIG_SYN_COOKIES
int want_cookie = 0;
-#else
-#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
-#endif
/* Never answer to SYNs send to broadcast or multicast */
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1250,14 +1267,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
* evidently real one.
*/
if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
- if (net_ratelimit())
- syn_flood_warning(skb);
-#ifdef CONFIG_SYN_COOKIES
- if (sysctl_tcp_syncookies) {
- want_cookie = 1;
- } else
-#endif
- goto drop;
+ want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
+ if (!want_cookie)
+ goto drop;
}
/* Accept backlog is full. If we have already queued enough
@@ -1303,9 +1315,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
while (l-- > 0)
*c++ ^= *hash_location++;
-#ifdef CONFIG_SYN_COOKIES
want_cookie = 0; /* not our kind of cookie */
-#endif
tmp_ext.cookie_out_never = 0; /* false */
tmp_ext.cookie_plus = tmp_opt.cookie_plus;
} else if (!tp->rx_opt.cookie_in_always) {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index d2fe4e06b47..0ce3d06dce6 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -328,6 +328,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
+ tw->tw_transparent = inet_sk(sk)->transparent;
tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
tcptw->tw_rcv_nxt = tp->rcv_nxt;
tcptw->tw_snd_nxt = tp->snd_nxt;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f012ebd87b4..12368c58606 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -374,8 +374,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
"%s(): cannot allocate memory for statistics; dev=%s.\n",
__func__, dev->name));
neigh_parms_release(&nd_tbl, ndev->nd_parms);
- ndev->dead = 1;
- in6_dev_finish_destroy(ndev);
+ dev_put(dev);
+ kfree(ndev);
return NULL;
}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 3b5669a2582..d27c797f9f0 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -875,6 +875,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
skb_reset_transport_header(skb);
__skb_push(skb, skb_gro_offset(skb));
+ ops = rcu_dereference(inet6_protos[proto]);
if (!ops || !ops->gro_receive)
goto out_unlock;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 9ef1831746e..b46e9f88ce3 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -599,7 +599,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
return 0;
}
-int datagram_send_ctl(struct net *net,
+int datagram_send_ctl(struct net *net, struct sock *sk,
struct msghdr *msg, struct flowi6 *fl6,
struct ipv6_txoptions *opt,
int *hlimit, int *tclass, int *dontfrag)
@@ -658,7 +658,8 @@ int datagram_send_ctl(struct net *net,
if (addr_type != IPV6_ADDR_ANY) {
int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
- if (!ipv6_chk_addr(net, &src_info->ipi6_addr,
+ if (!inet_sk(sk)->transparent &&
+ !ipv6_chk_addr(net, &src_info->ipi6_addr,
strict ? dev : NULL, 0))
err = -EINVAL;
else
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index f3caf1b8d57..54303945019 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -322,8 +322,8 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo
}
static struct ip6_flowlabel *
-fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
- int optlen, int *err_p)
+fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
+ char __user *optval, int optlen, int *err_p)
{
struct ip6_flowlabel *fl = NULL;
int olen;
@@ -360,7 +360,7 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
msg.msg_control = (void*)(fl->opt+1);
memset(&flowi6, 0, sizeof(flowi6));
- err = datagram_send_ctl(net, &msg, &flowi6, fl->opt, &junk,
+ err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk,
&junk, &junk);
if (err)
goto done;
@@ -528,7 +528,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
return -EINVAL;
- fl = fl_create(net, &freq, optval, optlen, &err);
+ fl = fl_create(net, sk, &freq, optval, optlen, &err);
if (fl == NULL)
return err;
sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 705c8288628..def0538e241 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -696,8 +696,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
int err;
err = ip6mr_fib_lookup(net, &fl6, &mrt);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(skb);
return err;
+ }
read_lock(&mrt_lock);
dev->stats.tx_bytes += skb->len;
@@ -2052,8 +2054,10 @@ int ip6_mr_input(struct sk_buff *skb)
int err;
err = ip6mr_fib_lookup(net, &fl6, &mrt);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(skb);
return err;
+ }
read_lock(&mrt_lock);
cache = ip6mr_cache_find(mrt,
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 147ede38ab4..2fbda5fc4cc 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -475,7 +475,7 @@ sticky_done:
msg.msg_controllen = optlen;
msg.msg_control = (void*)(opt+1);
- retv = datagram_send_ctl(net, &msg, &fl6, opt, &junk, &junk,
+ retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk,
&junk);
if (retv)
goto done;
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 24939486328..e63c3972a73 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
return skb;
nlmsg_failure:
+ kfree_skb(skb);
*errp = -EINVAL;
printk(KERN_ERR "ip6_queue: error creating packet message\n");
return NULL;
@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
{
struct nf_queue_entry *entry;
- if (vmsg->value > NF_MAX_VERDICT)
+ if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
return -EINVAL;
entry = ipq_find_dequeue_entry(vmsg->id);
@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
break;
case IPQM_VERDICT:
- if (pmsg->msg.verdict.value > NF_MAX_VERDICT)
- status = -EINVAL;
- else
- status = ipq_set_verdict(&pmsg->msg.verdict,
- len - sizeof(*pmsg));
- break;
+ status = ipq_set_verdict(&pmsg->msg.verdict,
+ len - sizeof(*pmsg));
+ break;
default:
status = -EINVAL;
}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 6a79f3081bd..343852e5c70 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -817,8 +817,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(struct ipv6_txoptions);
- err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit,
- &tclass, &dontfrag);
+ err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+ &hlimit, &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9e69eb0ec6d..fb545edef6e 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -104,6 +104,9 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
struct inet_peer *peer;
u32 *p = NULL;
+ if (!(rt->dst.flags & DST_HOST))
+ return NULL;
+
if (!rt->rt6i_peer)
rt6_bind_peer(rt, 1);
@@ -241,7 +244,9 @@ static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
{
struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
- memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
+ if (rt != NULL)
+ memset(&rt->rt6i_table, 0,
+ sizeof(*rt) - sizeof(struct dst_entry));
return rt;
}
@@ -252,6 +257,9 @@ static void ip6_dst_destroy(struct dst_entry *dst)
struct inet6_dev *idev = rt->rt6i_idev;
struct inet_peer *peer = rt->rt6i_peer;
+ if (!(rt->dst.flags & DST_HOST))
+ dst_destroy_metrics_generic(dst);
+
if (idev != NULL) {
rt->rt6i_idev = NULL;
in6_dev_put(idev);
@@ -723,9 +731,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
ipv6_addr_copy(&rt->rt6i_gateway, daddr);
}
- rt->rt6i_dst.plen = 128;
rt->rt6i_flags |= RTF_CACHE;
- rt->dst.flags |= DST_HOST;
#ifdef CONFIG_IPV6_SUBTREES
if (rt->rt6i_src.plen && saddr) {
@@ -775,9 +781,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
struct rt6_info *rt = ip6_rt_copy(ort, daddr);
if (rt) {
- rt->rt6i_dst.plen = 128;
rt->rt6i_flags |= RTF_CACHE;
- rt->dst.flags |= DST_HOST;
dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
}
return rt;
@@ -1078,12 +1082,15 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
neigh = NULL;
}
- rt->rt6i_idev = idev;
+ rt->dst.flags |= DST_HOST;
+ rt->dst.output = ip6_output;
dst_set_neighbour(&rt->dst, neigh);
atomic_set(&rt->dst.__refcnt, 1);
- ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
- rt->dst.output = ip6_output;
+
+ ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
+ rt->rt6i_dst.plen = 128;
+ rt->rt6i_idev = idev;
spin_lock_bh(&icmp6_dst_lock);
rt->dst.next = icmp6_dst_gc_list;
@@ -1261,6 +1268,14 @@ int ip6_route_add(struct fib6_config *cfg)
if (rt->rt6i_dst.plen == 128)
rt->dst.flags |= DST_HOST;
+ if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) {
+ u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+ if (!metrics) {
+ err = -ENOMEM;
+ goto out;
+ }
+ dst_init_metrics(&rt->dst, metrics, 0);
+ }
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
rt->rt6i_src.plen = cfg->fc_src_len;
@@ -1607,9 +1622,6 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
if (on_link)
nrt->rt6i_flags &= ~RTF_GATEWAY;
- nrt->rt6i_dst.plen = 128;
- nrt->dst.flags |= DST_HOST;
-
ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
@@ -1754,9 +1766,10 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
if (rt) {
rt->dst.input = ort->dst.input;
rt->dst.output = ort->dst.output;
+ rt->dst.flags |= DST_HOST;
ipv6_addr_copy(&rt->rt6i_dst.addr, dest);
- rt->rt6i_dst.plen = ort->rt6i_dst.plen;
+ rt->rt6i_dst.plen = 128;
dst_copy_metrics(&rt->dst, &ort->dst);
rt->dst.error = ort->dst.error;
rt->rt6i_idev = ort->rt6i_idev;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d1fb63f4aeb..7b8fc579435 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -531,20 +531,6 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
return tcp_v6_send_synack(sk, req, rvp);
}
-static inline void syn_flood_warning(struct sk_buff *skb)
-{
-#ifdef CONFIG_SYN_COOKIES
- if (sysctl_tcp_syncookies)
- printk(KERN_INFO
- "TCPv6: Possible SYN flooding on port %d. "
- "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
- else
-#endif
- printk(KERN_INFO
- "TCPv6: Possible SYN flooding on port %d. "
- "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
-}
-
static void tcp_v6_reqsk_destructor(struct request_sock *req)
{
kfree_skb(inet6_rsk(req)->pktopts);
@@ -605,7 +591,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
}
sk_nocaps_add(sk, NETIF_F_GSO_MASK);
}
- if (tcp_alloc_md5sig_pool(sk) == NULL) {
+ if (tp->md5sig_info->entries6 == 0 &&
+ tcp_alloc_md5sig_pool(sk) == NULL) {
kfree(newkey);
return -ENOMEM;
}
@@ -614,8 +601,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
(tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
if (!keys) {
- tcp_free_md5sig_pool();
kfree(newkey);
+ if (tp->md5sig_info->entries6 == 0)
+ tcp_free_md5sig_pool();
return -ENOMEM;
}
@@ -661,6 +649,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
kfree(tp->md5sig_info->keys6);
tp->md5sig_info->keys6 = NULL;
tp->md5sig_info->alloced6 = 0;
+ tcp_free_md5sig_pool();
} else {
/* shrink the database */
if (tp->md5sig_info->entries6 != i)
@@ -669,7 +658,6 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
(tp->md5sig_info->entries6 - i)
* sizeof (tp->md5sig_info->keys6[0]));
}
- tcp_free_md5sig_pool();
return 0;
}
}
@@ -1179,11 +1167,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
struct tcp_sock *tp = tcp_sk(sk);
__u32 isn = TCP_SKB_CB(skb)->when;
struct dst_entry *dst = NULL;
-#ifdef CONFIG_SYN_COOKIES
int want_cookie = 0;
-#else
-#define want_cookie 0
-#endif
if (skb->protocol == htons(ETH_P_IP))
return tcp_v4_conn_request(sk, skb);
@@ -1192,14 +1176,9 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop;
if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
- if (net_ratelimit())
- syn_flood_warning(skb);
-#ifdef CONFIG_SYN_COOKIES
- if (sysctl_tcp_syncookies)
- want_cookie = 1;
- else
-#endif
- goto drop;
+ want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
+ if (!want_cookie)
+ goto drop;
}
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
@@ -1249,9 +1228,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
while (l-- > 0)
*c++ ^= *hash_location++;
-#ifdef CONFIG_SYN_COOKIES
want_cookie = 0; /* not our kind of cookie */
-#endif
tmp_ext.cookie_out_never = 0; /* false */
tmp_ext.cookie_plus = tmp_opt.cookie_plus;
} else if (!tp->rx_opt.cookie_in_always) {
@@ -1408,6 +1385,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
@@ -1472,6 +1451,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
First: no IPv4 options.
*/
newinet->inet_opt = NULL;
+ newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
/* Clone RX bits */
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 29213b51c49..bb95e8e1c6f 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1090,8 +1090,8 @@ do_udp_sendmsg:
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(*opt);
- err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit,
- &tclass, &dontfrag);
+ err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+ &hlimit, &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index d0b70dadf73..2615ffc8e78 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -40,9 +40,9 @@ extern int sysctl_slot_timeout;
extern int sysctl_fast_poll_increase;
extern char sysctl_devname[];
extern int sysctl_max_baud_rate;
-extern int sysctl_min_tx_turn_time;
-extern int sysctl_max_tx_data_size;
-extern int sysctl_max_tx_window;
+extern unsigned int sysctl_min_tx_turn_time;
+extern unsigned int sysctl_max_tx_data_size;
+extern unsigned int sysctl_max_tx_window;
extern int sysctl_max_noreply_time;
extern int sysctl_warn_noreply_time;
extern int sysctl_lap_keepalive_time;
diff --git a/net/irda/qos.c b/net/irda/qos.c
index 1b51bcf4239..4369f7f41bc 100644
--- a/net/irda/qos.c
+++ b/net/irda/qos.c
@@ -60,7 +60,7 @@ int sysctl_max_noreply_time = 12;
* Default is 10us which means using the unmodified value given by the
* peer except if it's 0 (0 is likely a bug in the other stack).
*/
-unsigned sysctl_min_tx_turn_time = 10;
+unsigned int sysctl_min_tx_turn_time = 10;
/*
* Maximum data size to be used in transmission in payload of LAP frame.
* There is a bit of confusion in the IrDA spec :
@@ -75,13 +75,13 @@ unsigned sysctl_min_tx_turn_time = 10;
* bytes frames or all negotiated frame sizes, but you can use the sysctl
* to play with this value anyway.
* Jean II */
-unsigned sysctl_max_tx_data_size = 2042;
+unsigned int sysctl_max_tx_data_size = 2042;
/*
* Maximum transmit window, i.e. number of LAP frames between turn-around.
* This allow to override what the peer told us. Some peers are buggy and
* don't always support what they tell us.
* Jean II */
-unsigned sysctl_max_tx_window = 7;
+unsigned int sysctl_max_tx_window = 7;
static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get);
static int irlap_param_link_disconnect(void *instance, irda_param_t *parm,
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index ad4ac2601a5..34b2ddeacb6 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1045,8 +1045,10 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
headroom = NET_SKB_PAD + sizeof(struct iphdr) +
uhlen + hdr_len;
old_headroom = skb_headroom(skb);
- if (skb_cow_head(skb, headroom))
+ if (skb_cow_head(skb, headroom)) {
+ dev_kfree_skb(skb);
goto abort;
+ }
new_headroom = skb_headroom(skb);
skb_orphan(skb);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 3db78b696c5..21070e9bc8d 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -665,7 +665,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
BUG_ON(!sdata->bss);
atomic_dec(&sdata->bss->num_sta_ps);
- __sta_info_clear_tim_bit(sdata->bss, sta);
+ sta_info_clear_tim_bit(sta);
}
local->num_sta--;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 2b771dc708a..e3be48bf4dc 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2283,6 +2283,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
struct ip_vs_service *svc;
struct ip_vs_dest_user *udest_compat;
struct ip_vs_dest_user_kern udest;
+ struct netns_ipvs *ipvs = net_ipvs(net);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -2303,6 +2304,24 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
/* increase the module use count */
ip_vs_use_count_inc();
+ /* Handle daemons since they have another lock */
+ if (cmd == IP_VS_SO_SET_STARTDAEMON ||
+ cmd == IP_VS_SO_SET_STOPDAEMON) {
+ struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
+
+ if (mutex_lock_interruptible(&ipvs->sync_mutex)) {
+ ret = -ERESTARTSYS;
+ goto out_dec;
+ }
+ if (cmd == IP_VS_SO_SET_STARTDAEMON)
+ ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
+ dm->syncid);
+ else
+ ret = stop_sync_thread(net, dm->state);
+ mutex_unlock(&ipvs->sync_mutex);
+ goto out_dec;
+ }
+
if (mutex_lock_interruptible(&__ip_vs_mutex)) {
ret = -ERESTARTSYS;
goto out_dec;
@@ -2316,15 +2335,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
/* Set timeout values for (tcp tcpfin udp) */
ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg);
goto out_unlock;
- } else if (cmd == IP_VS_SO_SET_STARTDAEMON) {
- struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
- ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
- dm->syncid);
- goto out_unlock;
- } else if (cmd == IP_VS_SO_SET_STOPDAEMON) {
- struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
- ret = stop_sync_thread(net, dm->state);
- goto out_unlock;
}
usvc_compat = (struct ip_vs_service_user *)arg;
@@ -2584,6 +2594,33 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
if (copy_from_user(arg, user, copylen) != 0)
return -EFAULT;
+ /*
+ * Handle daemons first since it has its own locking
+ */
+ if (cmd == IP_VS_SO_GET_DAEMON) {
+ struct ip_vs_daemon_user d[2];
+
+ memset(&d, 0, sizeof(d));
+ if (mutex_lock_interruptible(&ipvs->sync_mutex))
+ return -ERESTARTSYS;
+
+ if (ipvs->sync_state & IP_VS_STATE_MASTER) {
+ d[0].state = IP_VS_STATE_MASTER;
+ strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
+ sizeof(d[0].mcast_ifn));
+ d[0].syncid = ipvs->master_syncid;
+ }
+ if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
+ d[1].state = IP_VS_STATE_BACKUP;
+ strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
+ sizeof(d[1].mcast_ifn));
+ d[1].syncid = ipvs->backup_syncid;
+ }
+ if (copy_to_user(user, &d, sizeof(d)) != 0)
+ ret = -EFAULT;
+ mutex_unlock(&ipvs->sync_mutex);
+ return ret;
+ }
if (mutex_lock_interruptible(&__ip_vs_mutex))
return -ERESTARTSYS;
@@ -2681,28 +2718,6 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
}
break;
- case IP_VS_SO_GET_DAEMON:
- {
- struct ip_vs_daemon_user d[2];
-
- memset(&d, 0, sizeof(d));
- if (ipvs->sync_state & IP_VS_STATE_MASTER) {
- d[0].state = IP_VS_STATE_MASTER;
- strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
- sizeof(d[0].mcast_ifn));
- d[0].syncid = ipvs->master_syncid;
- }
- if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
- d[1].state = IP_VS_STATE_BACKUP;
- strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
- sizeof(d[1].mcast_ifn));
- d[1].syncid = ipvs->backup_syncid;
- }
- if (copy_to_user(user, &d, sizeof(d)) != 0)
- ret = -EFAULT;
- }
- break;
-
default:
ret = -EINVAL;
}
@@ -3205,7 +3220,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
struct net *net = skb_sknet(skb);
struct netns_ipvs *ipvs = net_ipvs(net);
- mutex_lock(&__ip_vs_mutex);
+ mutex_lock(&ipvs->sync_mutex);
if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
ipvs->master_mcast_ifn,
@@ -3225,7 +3240,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
}
nla_put_failure:
- mutex_unlock(&__ip_vs_mutex);
+ mutex_unlock(&ipvs->sync_mutex);
return skb->len;
}
@@ -3271,13 +3286,9 @@ static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
return ip_vs_set_timeout(net, &t);
}
-static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
+static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info)
{
- struct ip_vs_service *svc = NULL;
- struct ip_vs_service_user_kern usvc;
- struct ip_vs_dest_user_kern udest;
int ret = 0, cmd;
- int need_full_svc = 0, need_full_dest = 0;
struct net *net;
struct netns_ipvs *ipvs;
@@ -3285,19 +3296,10 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
ipvs = net_ipvs(net);
cmd = info->genlhdr->cmd;
- mutex_lock(&__ip_vs_mutex);
-
- if (cmd == IPVS_CMD_FLUSH) {
- ret = ip_vs_flush(net);
- goto out;
- } else if (cmd == IPVS_CMD_SET_CONFIG) {
- ret = ip_vs_genl_set_config(net, info->attrs);
- goto out;
- } else if (cmd == IPVS_CMD_NEW_DAEMON ||
- cmd == IPVS_CMD_DEL_DAEMON) {
-
+ if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) {
struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1];
+ mutex_lock(&ipvs->sync_mutex);
if (!info->attrs[IPVS_CMD_ATTR_DAEMON] ||
nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX,
info->attrs[IPVS_CMD_ATTR_DAEMON],
@@ -3310,6 +3312,33 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
ret = ip_vs_genl_new_daemon(net, daemon_attrs);
else
ret = ip_vs_genl_del_daemon(net, daemon_attrs);
+out:
+ mutex_unlock(&ipvs->sync_mutex);
+ }
+ return ret;
+}
+
+static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
+{
+ struct ip_vs_service *svc = NULL;
+ struct ip_vs_service_user_kern usvc;
+ struct ip_vs_dest_user_kern udest;
+ int ret = 0, cmd;
+ int need_full_svc = 0, need_full_dest = 0;
+ struct net *net;
+ struct netns_ipvs *ipvs;
+
+ net = skb_sknet(skb);
+ ipvs = net_ipvs(net);
+ cmd = info->genlhdr->cmd;
+
+ mutex_lock(&__ip_vs_mutex);
+
+ if (cmd == IPVS_CMD_FLUSH) {
+ ret = ip_vs_flush(net);
+ goto out;
+ } else if (cmd == IPVS_CMD_SET_CONFIG) {
+ ret = ip_vs_genl_set_config(net, info->attrs);
goto out;
} else if (cmd == IPVS_CMD_ZERO &&
!info->attrs[IPVS_CMD_ATTR_SERVICE]) {
@@ -3536,13 +3565,13 @@ static struct genl_ops ip_vs_genl_ops[] __read_mostly = {
.cmd = IPVS_CMD_NEW_DAEMON,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
- .doit = ip_vs_genl_set_cmd,
+ .doit = ip_vs_genl_set_daemon,
},
{
.cmd = IPVS_CMD_DEL_DAEMON,
.flags = GENL_ADMIN_PERM,
.policy = ip_vs_cmd_policy,
- .doit = ip_vs_genl_set_cmd,
+ .doit = ip_vs_genl_set_daemon,
},
{
.cmd = IPVS_CMD_GET_DAEMON,
@@ -3679,7 +3708,7 @@ int __net_init ip_vs_control_net_init(struct net *net)
int idx;
struct netns_ipvs *ipvs = net_ipvs(net);
- ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock);
+ rwlock_init(&ipvs->rs_lock);
/* Initialize rs_table */
for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 7ee7215b8ba..3cdd479f9b5 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -61,6 +61,7 @@
#define SYNC_PROTO_VER 1 /* Protocol version in header */
+static struct lock_class_key __ipvs_sync_key;
/*
* IPVS sync connection entry
* Version 0, i.e. original version.
@@ -1545,6 +1546,7 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
sizeof(struct ip_vs_sync_conn_v0));
+
if (state == IP_VS_STATE_MASTER) {
if (ipvs->master_thread)
return -EEXIST;
@@ -1667,6 +1669,7 @@ int __net_init ip_vs_sync_net_init(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
+ __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key);
INIT_LIST_HEAD(&ipvs->sync_queue);
spin_lock_init(&ipvs->sync_lock);
spin_lock_init(&ipvs->sync_buff_lock);
@@ -1680,7 +1683,9 @@ int __net_init ip_vs_sync_net_init(struct net *net)
void ip_vs_sync_net_cleanup(struct net *net)
{
int retc;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ mutex_lock(&ipvs->sync_mutex);
retc = stop_sync_thread(net, IP_VS_STATE_MASTER);
if (retc && retc != -ESRCH)
pr_err("Failed to stop Master Daemon\n");
@@ -1688,4 +1693,5 @@ void ip_vs_sync_net_cleanup(struct net *net)
retc = stop_sync_thread(net, IP_VS_STATE_BACKUP);
if (retc && retc != -ESRCH)
pr_err("Failed to stop Backup Daemon\n");
+ mutex_unlock(&ipvs->sync_mutex);
}
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 2fd4565144d..31d56b23b9e 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -364,6 +364,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
break;
case PPTP_WAN_ERROR_NOTIFY:
+ case PPTP_SET_LINK_INFO:
case PPTP_ECHO_REQUEST:
case PPTP_ECHO_REPLY:
/* I don't have to explain these ;) */
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index cf616e55ca4..d69facdd9a7 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -241,8 +241,8 @@ static int gre_packet(struct nf_conn *ct,
nf_ct_refresh_acct(ct, ctinfo, skb,
ct->proto.gre.stream_timeout);
/* Also, more likely to be important, and not a probe. */
- set_bit(IPS_ASSURED_BIT, &ct->status);
- nf_conntrack_event_cache(IPCT_ASSURED, ct);
+ if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
+ nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else
nf_ct_refresh_acct(ct, ctinfo, skb,
ct->proto.gre.timeout);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 37bf94394be..8235b86b4e8 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -409,7 +409,7 @@ static void tcp_options(const struct sk_buff *skb,
if (opsize < 2) /* "silly options" */
return;
if (opsize > length)
- break; /* don't parse partial options */
+ return; /* don't parse partial options */
if (opcode == TCPOPT_SACK_PERM
&& opsize == TCPOLEN_SACK_PERM)
@@ -447,7 +447,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
BUG_ON(ptr == NULL);
/* Fast path for timestamp-only option */
- if (length == TCPOLEN_TSTAMP_ALIGNED*4
+ if (length == TCPOLEN_TSTAMP_ALIGNED
&& *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
| (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8)
@@ -469,7 +469,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
if (opsize < 2) /* "silly options" */
return;
if (opsize > length)
- break; /* don't parse partial options */
+ return; /* don't parse partial options */
if (opcode == TCPOPT_SACK
&& opsize >= (TCPOLEN_SACK_BASE
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 00bd475eab4..a80b0cb03f1 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -646,8 +646,8 @@ verdicthdr_get(const struct nlattr * const nfqa[])
return NULL;
vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
- verdict = ntohl(vhdr->verdict);
- if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT)
+ verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
+ if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
return NULL;
return vhdr;
}
diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c
index 76a083184d8..ed0db15ab00 100644
--- a/net/netfilter/xt_rateest.c
+++ b/net/netfilter/xt_rateest.c
@@ -78,7 +78,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
{
struct xt_rateest_match_info *info = par->matchinfo;
struct xt_rateest *est1, *est2;
- int ret = false;
+ int ret = -EINVAL;
if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS |
XT_RATEEST_MATCH_REL)) != 1)
@@ -101,13 +101,12 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
if (!est1)
goto err1;
+ est2 = NULL;
if (info->flags & XT_RATEEST_MATCH_REL) {
est2 = xt_rateest_lookup(info->name2);
if (!est2)
goto err2;
- } else
- est2 = NULL;
-
+ }
info->est1 = est1;
info->est2 = est2;
@@ -116,7 +115,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
err2:
xt_rateest_put(est1);
err1:
- return -EINVAL;
+ return ret;
}
static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c698cec0a44..fabb4fafa28 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -961,7 +961,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
return 0;
drop_n_acct:
- po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
+ spin_lock(&sk->sk_receive_queue.lock);
+ po->stats.tp_drops++;
+ atomic_inc(&sk->sk_drops);
+ spin_unlock(&sk->sk_receive_queue.lock);
drop_n_restore:
if (skb_head != skb->data && skb_shared(skb)) {
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index 8b77edbab27..4e1de171866 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -84,7 +84,8 @@ static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
struct list_head *unmap_list,
- struct list_head *kill_list);
+ struct list_head *kill_list,
+ int *unpinned);
static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
@@ -499,7 +500,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
LIST_HEAD(unmap_list);
LIST_HEAD(kill_list);
unsigned long flags;
- unsigned int nfreed = 0, ncleaned = 0, free_goal;
+ unsigned int nfreed = 0, ncleaned = 0, unpinned = 0, free_goal;
int ret = 0;
rds_iw_stats_inc(s_iw_rdma_mr_pool_flush);
@@ -524,7 +525,8 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
* will be destroyed by the unmap function.
*/
if (!list_empty(&unmap_list)) {
- ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, &kill_list);
+ ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list,
+ &kill_list, &unpinned);
/* If we've been asked to destroy all MRs, move those
* that were simply cleaned to the kill list */
if (free_all)
@@ -548,6 +550,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
spin_unlock_irqrestore(&pool->list_lock, flags);
}
+ atomic_sub(unpinned, &pool->free_pinned);
atomic_sub(ncleaned, &pool->dirty_count);
atomic_sub(nfreed, &pool->item_count);
@@ -828,7 +831,8 @@ static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool,
static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
struct list_head *unmap_list,
- struct list_head *kill_list)
+ struct list_head *kill_list,
+ int *unpinned)
{
struct rds_iw_mapping *mapping, *next;
unsigned int ncleaned = 0;
@@ -855,6 +859,7 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
spin_lock_irqsave(&pool->list_lock, flags);
list_for_each_entry_safe(mapping, next, unmap_list, m_list) {
+ *unpinned += mapping->m_sg.len;
list_move(&mapping->m_list, &laundered);
ncleaned++;
}
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index be4505ee67a..b01427924f8 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -425,7 +425,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
struct rsvp_filter *f, **fp;
struct rsvp_session *s, **sp;
struct tc_rsvp_pinfo *pinfo = NULL;
- struct nlattr *opt = tca[TCA_OPTIONS-1];
+ struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_RSVP_MAX + 1];
struct tcf_exts e;
unsigned int h1, h2;
@@ -439,7 +439,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
if (err < 0)
return err;
- err = tcf_exts_validate(tp, tb, tca[TCA_RATE-1], &e, &rsvp_ext_map);
+ err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map);
if (err < 0)
return err;
@@ -449,8 +449,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
if (f->handle != handle && handle)
goto errout2;
- if (tb[TCA_RSVP_CLASSID-1]) {
- f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]);
+ if (tb[TCA_RSVP_CLASSID]) {
+ f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
tcf_bind_filter(tp, &f->res, base);
}
@@ -462,7 +462,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
err = -EINVAL;
if (handle)
goto errout2;
- if (tb[TCA_RSVP_DST-1] == NULL)
+ if (tb[TCA_RSVP_DST] == NULL)
goto errout2;
err = -ENOBUFS;
@@ -471,19 +471,19 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
goto errout2;
h2 = 16;
- if (tb[TCA_RSVP_SRC-1]) {
- memcpy(f->src, nla_data(tb[TCA_RSVP_SRC-1]), sizeof(f->src));
+ if (tb[TCA_RSVP_SRC]) {
+ memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
h2 = hash_src(f->src);
}
- if (tb[TCA_RSVP_PINFO-1]) {
- pinfo = nla_data(tb[TCA_RSVP_PINFO-1]);
+ if (tb[TCA_RSVP_PINFO]) {
+ pinfo = nla_data(tb[TCA_RSVP_PINFO]);
f->spi = pinfo->spi;
f->tunnelhdr = pinfo->tunnelhdr;
}
- if (tb[TCA_RSVP_CLASSID-1])
- f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]);
+ if (tb[TCA_RSVP_CLASSID])
+ f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
- dst = nla_data(tb[TCA_RSVP_DST-1]);
+ dst = nla_data(tb[TCA_RSVP_DST]);
h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
err = -ENOMEM;
@@ -642,8 +642,7 @@ nla_put_failure:
return -1;
}
-static struct tcf_proto_ops RSVP_OPS = {
- .next = NULL,
+static struct tcf_proto_ops RSVP_OPS __read_mostly = {
.kind = RSVP_ID,
.classify = rsvp_classify,
.init = rsvp_init,
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 167c880cf8d..76388b083f2 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1689,6 +1689,11 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_PURGE_ASCONF_QUEUE:
sctp_asconf_queue_teardown(asoc);
break;
+
+ case SCTP_CMD_SET_ASOC:
+ asoc = cmd->obj.asoc;
+ break;
+
default:
pr_warn("Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 49b847b00f9..a0f31e6c1c6 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2047,6 +2047,12 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
+ /* Restore association pointer to provide SCTP command interpeter
+ * with a valid context in case it needs to manipulate
+ * the queues */
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC,
+ SCTP_ASOC((struct sctp_association *)asoc));
+
return retval;
nomem:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index e83e7fee3bc..ea40d540a99 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4113,9 +4113,12 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
if (len % sizeof(u32))
return -EINVAL;
+ if (settings->n_akm_suites > NL80211_MAX_NR_AKM_SUITES)
+ return -EINVAL;
+
memcpy(settings->akm_suites, data, len);
- for (i = 0; i < settings->n_ciphers_pairwise; i++)
+ for (i = 0; i < settings->n_akm_suites; i++)
if (!nl80211_valid_akm_suite(settings->akm_suites[i]))
return -EINVAL;
}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 02751dbc5a9..68a471ba193 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -852,6 +852,7 @@ static void handle_channel(struct wiphy *wiphy,
return;
}
+ chan->beacon_found = false;
chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
chan->max_antenna_gain = min(chan->orig_mag,
(int) MBI_TO_DBI(power_rule->max_antenna_gain));
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index b7b6ff8be55..dec0fa28372 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -118,6 +118,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
i++, j++)
request->channels[i] =
&wdev->wiphy->bands[band]->channels[j];
+ request->rates[band] =
+ (1 << wdev->wiphy->bands[band]->n_bitrates) - 1;
}
}
request->n_channels = n_channels;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index d30615419b4..5f03e4ea65b 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -91,7 +91,7 @@ int x25_parse_address_block(struct sk_buff *skb,
int needed;
int rc;
- if (skb->len < 1) {
+ if (!pskb_may_pull(skb, 1)) {
/* packet has no address block */
rc = 0;
goto empty;
@@ -100,7 +100,7 @@ int x25_parse_address_block(struct sk_buff *skb,
len = *skb->data;
needed = 1 + (len >> 4) + (len & 0x0f);
- if (skb->len < needed) {
+ if (!pskb_may_pull(skb, needed)) {
/* packet is too short to hold the addresses it claims
to hold */
rc = -1;
@@ -295,7 +295,8 @@ static struct sock *x25_find_listener(struct x25_address *addr,
* Found a listening socket, now check the incoming
* call user data vs this sockets call user data
*/
- if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) {
+ if (x25_sk(s)->cudmatchlength > 0 &&
+ skb->len >= x25_sk(s)->cudmatchlength) {
if((memcmp(x25_sk(s)->calluserdata.cuddata,
skb->data,
x25_sk(s)->cudmatchlength)) == 0) {
@@ -951,14 +952,27 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
*
* Facilities length is mandatory in call request packets
*/
- if (skb->len < 1)
+ if (!pskb_may_pull(skb, 1))
goto out_clear_request;
len = skb->data[0] + 1;
- if (skb->len < len)
+ if (!pskb_may_pull(skb, len))
goto out_clear_request;
skb_pull(skb,len);
/*
+ * Ensure that the amount of call user data is valid.
+ */
+ if (skb->len > X25_MAX_CUD_LEN)
+ goto out_clear_request;
+
+ /*
+ * Get all the call user data so it can be used in
+ * x25_find_listener and skb_copy_from_linear_data up ahead.
+ */
+ if (!pskb_may_pull(skb, skb->len))
+ goto out_clear_request;
+
+ /*
* Find a listener for the particular address/cud pair.
*/
sk = x25_find_listener(&source_addr,skb);
@@ -1166,6 +1180,9 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
* byte of the user data is the logical value of the Q Bit.
*/
if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
+ if (!pskb_may_pull(skb, 1))
+ goto out_kfree_skb;
+
qbit = skb->data[0];
skb_pull(skb, 1);
}
@@ -1244,7 +1261,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
struct x25_sock *x25 = x25_sk(sk);
struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name;
size_t copied;
- int qbit;
+ int qbit, header_len = x25->neighbour->extended ?
+ X25_EXT_MIN_LEN : X25_STD_MIN_LEN;
+
struct sk_buff *skb;
unsigned char *asmptr;
int rc = -ENOTCONN;
@@ -1265,6 +1284,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
skb = skb_dequeue(&x25->interrupt_in_queue);
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
+ goto out_free_dgram;
+
skb_pull(skb, X25_STD_MIN_LEN);
/*
@@ -1285,10 +1307,12 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!skb)
goto out;
+ if (!pskb_may_pull(skb, header_len))
+ goto out_free_dgram;
+
qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT;
- skb_pull(skb, x25->neighbour->extended ?
- X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
+ skb_pull(skb, header_len);
if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
asmptr = skb_push(skb, 1);
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index e547ca1578c..fa2b41888bd 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -32,6 +32,9 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
unsigned short frametype;
unsigned int lci;
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
+ return 0;
+
frametype = skb->data[2];
lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
@@ -115,6 +118,9 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
goto drop;
}
+ if (!pskb_may_pull(skb, 1))
+ return 0;
+
switch (skb->data[0]) {
case X25_IFACE_DATA:
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index f77e4e75f91..36384a1fa9f 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -44,7 +44,7 @@
int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
{
- unsigned char *p = skb->data;
+ unsigned char *p;
unsigned int len;
*vc_fac_mask = 0;
@@ -60,14 +60,16 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
- if (skb->len < 1)
+ if (!pskb_may_pull(skb, 1))
return 0;
- len = *p++;
+ len = skb->data[0];
- if (len >= skb->len)
+ if (!pskb_may_pull(skb, 1 + len))
return -1;
+ p = skb->data + 1;
+
while (len > 0) {
switch (*p & X25_FAC_CLASS_MASK) {
case X25_FAC_CLASS_A:
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 0b073b51b18..a49cd4ec551 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -107,6 +107,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
/*
* Parse the data in the frame.
*/
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
+ goto out_clear;
skb_pull(skb, X25_STD_MIN_LEN);
len = x25_parse_address_block(skb, &source_addr,
@@ -127,9 +129,11 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
* Copy any Call User Data.
*/
if (skb->len > 0) {
- skb_copy_from_linear_data(skb,
- x25->calluserdata.cuddata,
- skb->len);
+ if (skb->len > X25_MAX_CUD_LEN)
+ goto out_clear;
+
+ skb_copy_bits(skb, 0, x25->calluserdata.cuddata,
+ skb->len);
x25->calluserdata.cudlength = skb->len;
}
if (!sock_flag(sk, SOCK_DEAD))
@@ -137,6 +141,9 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
break;
}
case X25_CLEAR_REQUEST:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
+ goto out_clear;
+
x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
break;
@@ -164,6 +171,9 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp
switch (frametype) {
case X25_CLEAR_REQUEST:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
+ goto out_clear;
+
x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
break;
@@ -177,6 +187,11 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp
}
return 0;
+
+out_clear:
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25_start_t23timer(sk);
+ return 0;
}
/*
@@ -206,6 +221,9 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
break;
case X25_CLEAR_REQUEST:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
+ goto out_clear;
+
x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
break;
@@ -304,6 +322,12 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
}
return queued;
+
+out_clear:
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25->state = X25_STATE_2;
+ x25_start_t23timer(sk);
+ return 0;
}
/*
@@ -313,13 +337,13 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
*/
static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
+ struct x25_sock *x25 = x25_sk(sk);
+
switch (frametype) {
case X25_RESET_REQUEST:
x25_write_internal(sk, X25_RESET_CONFIRMATION);
case X25_RESET_CONFIRMATION: {
- struct x25_sock *x25 = x25_sk(sk);
-
x25_stop_timer(sk);
x25->condition = 0x00;
x25->va = 0;
@@ -331,6 +355,9 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp
break;
}
case X25_CLEAR_REQUEST:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
+ goto out_clear;
+
x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
break;
@@ -340,6 +367,12 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp
}
return 0;
+
+out_clear:
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25->state = X25_STATE_2;
+ x25_start_t23timer(sk);
+ return 0;
}
/* Higher level upcall for a LAPB frame */
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 037958ff8ee..4acacf3c661 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -90,6 +90,9 @@ void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
break;
case X25_DIAGNOSTIC:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
+ break;
+
printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n",
skb->data[3], skb->data[4],
skb->data[5], skb->data[6]);
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 24a342ebc7f..5170d52bfd9 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -269,7 +269,11 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
int *d, int *m)
{
struct x25_sock *x25 = x25_sk(sk);
- unsigned char *frame = skb->data;
+ unsigned char *frame;
+
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
+ return X25_ILLEGAL;
+ frame = skb->data;
*ns = *nr = *q = *d = *m = 0;
@@ -294,6 +298,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
if (frame[2] == X25_RR ||
frame[2] == X25_RNR ||
frame[2] == X25_REJ) {
+ if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
+ return X25_ILLEGAL;
+ frame = skb->data;
+
*nr = (frame[3] >> 1) & 0x7F;
return frame[2];
}
@@ -308,6 +316,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
if (x25->neighbour->extended) {
if ((frame[2] & 0x01) == X25_DATA) {
+ if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
+ return X25_ILLEGAL;
+ frame = skb->data;
+
*q = (frame[0] & X25_Q_BIT) == X25_Q_BIT;
*d = (frame[0] & X25_D_BIT) == X25_D_BIT;
*m = (frame[3] & X25_EXT_M_BIT) == X25_EXT_M_BIT;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index a026b0ef244..54a0dc2e2f8 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -212,6 +212,11 @@ resume:
/* only the first xfrm gets the encap type */
encap_type = 0;
+ if (async && x->repl->check(x, skb, seq)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
+ goto drop_unlock;
+ }
+
x->repl->advance(x, seq);
x->curlft.bytes += skb->len;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 94fdcc7f103..552df27dcf5 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1349,14 +1349,16 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
BUG();
}
xdst = dst_alloc(dst_ops, NULL, 0, 0, 0);
- memset(&xdst->u.rt6.rt6i_table, 0, sizeof(*xdst) - sizeof(struct dst_entry));
- xfrm_policy_put_afinfo(afinfo);
- if (likely(xdst))
+ if (likely(xdst)) {
+ memset(&xdst->u.rt6.rt6i_table, 0,
+ sizeof(*xdst) - sizeof(struct dst_entry));
xdst->flo.ops = &xfrm_bundle_fc_ops;
- else
+ } else
xdst = ERR_PTR(-ENOBUFS);
+ xfrm_policy_put_afinfo(afinfo);
+
return xdst;
}
diff --git a/security/Kconfig b/security/Kconfig
index e0f08b52e4a..51bd5a0b69a 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -38,7 +38,9 @@ config TRUSTED_KEYS
config ENCRYPTED_KEYS
tristate "ENCRYPTED KEYS"
- depends on KEYS && TRUSTED_KEYS
+ depends on KEYS
+ select CRYPTO
+ select CRYPTO_HMAC
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_SHA256
@@ -186,7 +188,7 @@ source security/smack/Kconfig
source security/tomoyo/Kconfig
source security/apparmor/Kconfig
-source security/integrity/ima/Kconfig
+source security/integrity/Kconfig
choice
prompt "Default security module"
diff --git a/security/Makefile b/security/Makefile
index 8bb0fe9e1ca..a5e502f8a05 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -24,5 +24,5 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/built-in.o
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
# Object integrity file lists
-subdir-$(CONFIG_IMA) += integrity/ima
-obj-$(CONFIG_IMA) += integrity/ima/built-in.o
+subdir-$(CONFIG_INTEGRITY) += integrity
+obj-$(CONFIG_INTEGRITY) += integrity/built-in.o
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 0848292982a..69ddb47787b 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -200,7 +200,7 @@ void __init aa_destroy_aafs(void)
*
* Returns: error on failure
*/
-int __init aa_create_aafs(void)
+static int __init aa_create_aafs(void)
{
int error;
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index 649fad88869..7ee05c6f3c6 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -19,6 +19,7 @@
#include "include/capability.h"
#include "include/context.h"
#include "include/policy.h"
+#include "include/ipc.h"
/* call back to audit ptrace fields */
static void audit_cb(struct audit_buffer *ab, void *va)
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index b82e383beb7..9516948041a 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -18,6 +18,7 @@
#include <linux/vmalloc.h>
#include "include/audit.h"
+#include "include/apparmor.h"
/**
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index d6d9a57b565..741dd13e089 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -381,11 +381,11 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
profile->file.trans.size = size;
for (i = 0; i < size; i++) {
char *str;
- int c, j, size = unpack_strdup(e, &str, NULL);
+ int c, j, size2 = unpack_strdup(e, &str, NULL);
/* unpack_strdup verifies that the last character is
* null termination byte.
*/
- if (!size)
+ if (!size2)
goto fail;
profile->file.trans.table[i] = str;
/* verify that name doesn't start with space */
@@ -393,7 +393,7 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
goto fail;
/* count internal # of internal \0 */
- for (c = j = 0; j < size - 2; j++) {
+ for (c = j = 0; j < size2 - 2; j++) {
if (!str[j])
c++;
}
@@ -440,11 +440,11 @@ static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
if (size > RLIM_NLIMITS)
goto fail;
for (i = 0; i < size; i++) {
- u64 tmp = 0;
+ u64 tmp2 = 0;
int a = aa_map_resource(i);
- if (!unpack_u64(e, &tmp, NULL))
+ if (!unpack_u64(e, &tmp2, NULL))
goto fail;
- profile->rlimits.limits[a].rlim_max = tmp;
+ profile->rlimits.limits[a].rlim_max = tmp2;
}
if (!unpack_nameX(e, AA_ARRAYEND, NULL))
goto fail;
diff --git a/security/apparmor/procattr.c b/security/apparmor/procattr.c
index 04a2cf8d1b6..1b41c542d37 100644
--- a/security/apparmor/procattr.c
+++ b/security/apparmor/procattr.c
@@ -16,6 +16,7 @@
#include "include/context.h"
#include "include/policy.h"
#include "include/domain.h"
+#include "include/procattr.h"
/**
diff --git a/security/commoncap.c b/security/commoncap.c
index a93b3b73307..ee4f8486e5f 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -332,7 +332,8 @@ int cap_inode_killpriv(struct dentry *dentry)
*/
static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
struct linux_binprm *bprm,
- bool *effective)
+ bool *effective,
+ bool *has_cap)
{
struct cred *new = bprm->cred;
unsigned i;
@@ -341,6 +342,9 @@ static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
if (caps->magic_etc & VFS_CAP_FLAGS_EFFECTIVE)
*effective = true;
+ if (caps->magic_etc & VFS_CAP_REVISION_MASK)
+ *has_cap = true;
+
CAP_FOR_EACH_U32(i) {
__u32 permitted = caps->permitted.cap[i];
__u32 inheritable = caps->inheritable.cap[i];
@@ -424,7 +428,7 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
* its xattrs and, if present, apply them to the proposed credentials being
* constructed by execve().
*/
-static int get_file_caps(struct linux_binprm *bprm, bool *effective)
+static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_cap)
{
struct dentry *dentry;
int rc = 0;
@@ -450,7 +454,7 @@ static int get_file_caps(struct linux_binprm *bprm, bool *effective)
goto out;
}
- rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective);
+ rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective, has_cap);
if (rc == -EINVAL)
printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n",
__func__, rc, bprm->filename);
@@ -475,11 +479,11 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
{
const struct cred *old = current_cred();
struct cred *new = bprm->cred;
- bool effective;
+ bool effective, has_cap = false;
int ret;
effective = false;
- ret = get_file_caps(bprm, &effective);
+ ret = get_file_caps(bprm, &effective, &has_cap);
if (ret < 0)
return ret;
@@ -489,7 +493,7 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
* for a setuid root binary run by a non-root user. Do set it
* for a root user just to cause least surprise to an admin.
*/
- if (effective && new->uid != 0 && new->euid == 0) {
+ if (has_cap && new->uid != 0 && new->euid == 0) {
warn_setuid_and_fcaps_mixed(bprm->filename);
goto skip;
}
diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
new file mode 100644
index 00000000000..4bf00acf793
--- /dev/null
+++ b/security/integrity/Kconfig
@@ -0,0 +1,7 @@
+#
+config INTEGRITY
+ def_bool y
+ depends on IMA || EVM
+
+source security/integrity/ima/Kconfig
+source security/integrity/evm/Kconfig
diff --git a/security/integrity/Makefile b/security/integrity/Makefile
new file mode 100644
index 00000000000..0ae44aea651
--- /dev/null
+++ b/security/integrity/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for caching inode integrity data (iint)
+#
+
+obj-$(CONFIG_INTEGRITY) += integrity.o
+
+integrity-y := iint.o
+
+subdir-$(CONFIG_IMA) += ima
+obj-$(CONFIG_IMA) += ima/built-in.o
+subdir-$(CONFIG_EVM) += evm
+obj-$(CONFIG_EVM) += evm/built-in.o
diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig
new file mode 100644
index 00000000000..afbb59dd262
--- /dev/null
+++ b/security/integrity/evm/Kconfig
@@ -0,0 +1,13 @@
+config EVM
+ boolean "EVM support"
+ depends on SECURITY && KEYS && (TRUSTED_KEYS=y || TRUSTED_KEYS=n)
+ select CRYPTO_HMAC
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select ENCRYPTED_KEYS
+ default n
+ help
+ EVM protects a file's security extended attributes against
+ integrity attacks.
+
+ If you are unsure how to answer this question, answer N.
diff --git a/security/integrity/evm/Makefile b/security/integrity/evm/Makefile
new file mode 100644
index 00000000000..7393c415a06
--- /dev/null
+++ b/security/integrity/evm/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for building the Extended Verification Module(EVM)
+#
+obj-$(CONFIG_EVM) += evm.o
+
+evm-y := evm_main.o evm_crypto.o evm_secfs.o
+evm-$(CONFIG_FS_POSIX_ACL) += evm_posix_acl.o
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
new file mode 100644
index 00000000000..d320f519743
--- /dev/null
+++ b/security/integrity/evm/evm.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm.h
+ *
+ */
+#include <linux/xattr.h>
+#include <linux/security.h>
+#include "../integrity.h"
+
+extern int evm_initialized;
+extern char *evm_hmac;
+
+extern struct crypto_shash *hmac_tfm;
+
+/* List of EVM protected security xattrs */
+extern char *evm_config_xattrnames[];
+
+extern int evm_init_key(void);
+extern int evm_update_evmxattr(struct dentry *dentry,
+ const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len);
+extern int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len, char *digest);
+extern int evm_init_hmac(struct inode *inode, const struct xattr *xattr,
+ char *hmac_val);
+extern int evm_init_secfs(void);
+extern void evm_cleanup_secfs(void);
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
new file mode 100644
index 00000000000..5dd5b140242
--- /dev/null
+++ b/security/integrity/evm/evm_crypto.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_crypto.c
+ * Using root's kernel master key (kmk), calculate the HMAC
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/xattr.h>
+#include <keys/encrypted-type.h>
+#include <crypto/hash.h>
+#include "evm.h"
+
+#define EVMKEY "evm-key"
+#define MAX_KEY_SIZE 128
+static unsigned char evmkey[MAX_KEY_SIZE];
+static int evmkey_len = MAX_KEY_SIZE;
+
+struct crypto_shash *hmac_tfm;
+
+static struct shash_desc *init_desc(void)
+{
+ int rc;
+ struct shash_desc *desc;
+
+ if (hmac_tfm == NULL) {
+ hmac_tfm = crypto_alloc_shash(evm_hmac, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hmac_tfm)) {
+ pr_err("Can not allocate %s (reason: %ld)\n",
+ evm_hmac, PTR_ERR(hmac_tfm));
+ rc = PTR_ERR(hmac_tfm);
+ hmac_tfm = NULL;
+ return ERR_PTR(rc);
+ }
+ }
+
+ desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac_tfm),
+ GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ desc->tfm = hmac_tfm;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
+ if (rc)
+ goto out;
+ rc = crypto_shash_init(desc);
+out:
+ if (rc) {
+ kfree(desc);
+ return ERR_PTR(rc);
+ }
+ return desc;
+}
+
+/* Protect against 'cutting & pasting' security.evm xattr, include inode
+ * specific info.
+ *
+ * (Additional directory/file metadata needs to be added for more complete
+ * protection.)
+ */
+static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
+ char *digest)
+{
+ struct h_misc {
+ unsigned long ino;
+ __u32 generation;
+ uid_t uid;
+ gid_t gid;
+ umode_t mode;
+ } hmac_misc;
+
+ memset(&hmac_misc, 0, sizeof hmac_misc);
+ hmac_misc.ino = inode->i_ino;
+ hmac_misc.generation = inode->i_generation;
+ hmac_misc.uid = inode->i_uid;
+ hmac_misc.gid = inode->i_gid;
+ hmac_misc.mode = inode->i_mode;
+ crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof hmac_misc);
+ crypto_shash_final(desc, digest);
+}
+
+/*
+ * Calculate the HMAC value across the set of protected security xattrs.
+ *
+ * Instead of retrieving the requested xattr, for performance, calculate
+ * the hmac using the requested xattr value. Don't alloc/free memory for
+ * each xattr, but attempt to re-use the previously allocated memory.
+ */
+int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value, size_t req_xattr_value_len,
+ char *digest)
+{
+ struct inode *inode = dentry->d_inode;
+ struct shash_desc *desc;
+ char **xattrname;
+ size_t xattr_size = 0;
+ char *xattr_value = NULL;
+ int error;
+ int size;
+
+ if (!inode->i_op || !inode->i_op->getxattr)
+ return -EOPNOTSUPP;
+ desc = init_desc();
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ error = -ENODATA;
+ for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
+ if ((req_xattr_name && req_xattr_value)
+ && !strcmp(*xattrname, req_xattr_name)) {
+ error = 0;
+ crypto_shash_update(desc, (const u8 *)req_xattr_value,
+ req_xattr_value_len);
+ continue;
+ }
+ size = vfs_getxattr_alloc(dentry, *xattrname,
+ &xattr_value, xattr_size, GFP_NOFS);
+ if (size == -ENOMEM) {
+ error = -ENOMEM;
+ goto out;
+ }
+ if (size < 0)
+ continue;
+
+ error = 0;
+ xattr_size = size;
+ crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size);
+ }
+ hmac_add_misc(desc, inode, digest);
+
+out:
+ kfree(xattr_value);
+ kfree(desc);
+ return error;
+}
+
+/*
+ * Calculate the hmac and update security.evm xattr
+ *
+ * Expects to be called with i_mutex locked.
+ */
+int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
+ const char *xattr_value, size_t xattr_value_len)
+{
+ struct inode *inode = dentry->d_inode;
+ struct evm_ima_xattr_data xattr_data;
+ int rc = 0;
+
+ rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, xattr_data.digest);
+ if (rc == 0) {
+ xattr_data.type = EVM_XATTR_HMAC;
+ rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM,
+ &xattr_data,
+ sizeof(xattr_data), 0);
+ }
+ else if (rc == -ENODATA)
+ rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM);
+ return rc;
+}
+
+int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr,
+ char *hmac_val)
+{
+ struct shash_desc *desc;
+
+ desc = init_desc();
+ if (IS_ERR(desc)) {
+ printk(KERN_INFO "init_desc failed\n");
+ return PTR_ERR(desc);
+ }
+
+ crypto_shash_update(desc, lsm_xattr->value, lsm_xattr->value_len);
+ hmac_add_misc(desc, inode, hmac_val);
+ kfree(desc);
+ return 0;
+}
+
+/*
+ * Get the key from the TPM for the SHA1-HMAC
+ */
+int evm_init_key(void)
+{
+ struct key *evm_key;
+ struct encrypted_key_payload *ekp;
+ int rc = 0;
+
+ evm_key = request_key(&key_type_encrypted, EVMKEY, NULL);
+ if (IS_ERR(evm_key))
+ return -ENOENT;
+
+ down_read(&evm_key->sem);
+ ekp = evm_key->payload.data;
+ if (ekp->decrypted_datalen > MAX_KEY_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(evmkey, ekp->decrypted_data, ekp->decrypted_datalen);
+out:
+ /* burn the original key contents */
+ memset(ekp->decrypted_data, 0, ekp->decrypted_datalen);
+ up_read(&evm_key->sem);
+ key_put(evm_key);
+ return rc;
+}
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
new file mode 100644
index 00000000000..92d3d99a9f7
--- /dev/null
+++ b/security/integrity/evm/evm_main.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_main.c
+ * implements evm_inode_setxattr, evm_inode_post_setxattr,
+ * evm_inode_removexattr, and evm_verifyxattr
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/xattr.h>
+#include <linux/integrity.h>
+#include <linux/evm.h>
+#include <crypto/hash.h>
+#include "evm.h"
+
+int evm_initialized;
+
+char *evm_hmac = "hmac(sha1)";
+
+char *evm_config_xattrnames[] = {
+#ifdef CONFIG_SECURITY_SELINUX
+ XATTR_NAME_SELINUX,
+#endif
+#ifdef CONFIG_SECURITY_SMACK
+ XATTR_NAME_SMACK,
+#endif
+ XATTR_NAME_CAPS,
+ NULL
+};
+
+static int evm_fixmode;
+static int __init evm_set_fixmode(char *str)
+{
+ if (strncmp(str, "fix", 3) == 0)
+ evm_fixmode = 1;
+ return 0;
+}
+__setup("evm=", evm_set_fixmode);
+
+/*
+ * evm_verify_hmac - calculate and compare the HMAC with the EVM xattr
+ *
+ * Compute the HMAC on the dentry's protected set of extended attributes
+ * and compare it against the stored security.evm xattr.
+ *
+ * For performance:
+ * - use the previoulsy retrieved xattr value and length to calculate the
+ * HMAC.)
+ * - cache the verification result in the iint, when available.
+ *
+ * Returns integrity status
+ */
+static enum integrity_status evm_verify_hmac(struct dentry *dentry,
+ const char *xattr_name,
+ char *xattr_value,
+ size_t xattr_value_len,
+ struct integrity_iint_cache *iint)
+{
+ struct evm_ima_xattr_data xattr_data;
+ enum integrity_status evm_status = INTEGRITY_PASS;
+ int rc;
+
+ if (iint && iint->evm_status == INTEGRITY_PASS)
+ return iint->evm_status;
+
+ /* if status is not PASS, try to check again - against -ENOMEM */
+
+ rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, xattr_data.digest);
+ if (rc < 0) {
+ evm_status = (rc == -ENODATA)
+ ? INTEGRITY_NOXATTRS : INTEGRITY_FAIL;
+ goto out;
+ }
+
+ xattr_data.type = EVM_XATTR_HMAC;
+ rc = vfs_xattr_cmp(dentry, XATTR_NAME_EVM, (u8 *)&xattr_data,
+ sizeof xattr_data, GFP_NOFS);
+ if (rc < 0)
+ evm_status = (rc == -ENODATA)
+ ? INTEGRITY_NOLABEL : INTEGRITY_FAIL;
+out:
+ if (iint)
+ iint->evm_status = evm_status;
+ return evm_status;
+}
+
+static int evm_protected_xattr(const char *req_xattr_name)
+{
+ char **xattrname;
+ int namelen;
+ int found = 0;
+
+ namelen = strlen(req_xattr_name);
+ for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
+ if ((strlen(*xattrname) == namelen)
+ && (strncmp(req_xattr_name, *xattrname, namelen) == 0)) {
+ found = 1;
+ break;
+ }
+ if (strncmp(req_xattr_name,
+ *xattrname + XATTR_SECURITY_PREFIX_LEN,
+ strlen(req_xattr_name)) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ return found;
+}
+
+/**
+ * evm_verifyxattr - verify the integrity of the requested xattr
+ * @dentry: object of the verify xattr
+ * @xattr_name: requested xattr
+ * @xattr_value: requested xattr value
+ * @xattr_value_len: requested xattr value length
+ *
+ * Calculate the HMAC for the given dentry and verify it against the stored
+ * security.evm xattr. For performance, use the xattr value and length
+ * previously retrieved to calculate the HMAC.
+ *
+ * Returns the xattr integrity status.
+ *
+ * This function requires the caller to lock the inode's i_mutex before it
+ * is executed.
+ */
+enum integrity_status evm_verifyxattr(struct dentry *dentry,
+ const char *xattr_name,
+ void *xattr_value, size_t xattr_value_len,
+ struct integrity_iint_cache *iint)
+{
+ if (!evm_initialized || !evm_protected_xattr(xattr_name))
+ return INTEGRITY_UNKNOWN;
+
+ if (!iint) {
+ iint = integrity_iint_find(dentry->d_inode);
+ if (!iint)
+ return INTEGRITY_UNKNOWN;
+ }
+ return evm_verify_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, iint);
+}
+EXPORT_SYMBOL_GPL(evm_verifyxattr);
+
+/*
+ * evm_verify_current_integrity - verify the dentry's metadata integrity
+ * @dentry: pointer to the affected dentry
+ *
+ * Verify and return the dentry's metadata integrity. The exceptions are
+ * before EVM is initialized or in 'fix' mode.
+ */
+static enum integrity_status evm_verify_current_integrity(struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+
+ if (!evm_initialized || !S_ISREG(inode->i_mode) || evm_fixmode)
+ return 0;
+ return evm_verify_hmac(dentry, NULL, NULL, 0, NULL);
+}
+
+/*
+ * evm_protect_xattr - protect the EVM extended attribute
+ *
+ * Prevent security.evm from being modified or removed without the
+ * necessary permissions or when the existing value is invalid.
+ *
+ * The posix xattr acls are 'system' prefixed, which normally would not
+ * affect security.evm. An interesting side affect of writing posix xattr
+ * acls is their modifying of the i_mode, which is included in security.evm.
+ * For posix xattr acls only, permit security.evm, even if it currently
+ * doesn't exist, to be updated.
+ */
+static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ enum integrity_status evm_status;
+
+ if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ } else if (!evm_protected_xattr(xattr_name)) {
+ if (!posix_xattr_acl(xattr_name))
+ return 0;
+ evm_status = evm_verify_current_integrity(dentry);
+ if ((evm_status == INTEGRITY_PASS) ||
+ (evm_status == INTEGRITY_NOXATTRS))
+ return 0;
+ return -EPERM;
+ }
+ evm_status = evm_verify_current_integrity(dentry);
+ return evm_status == INTEGRITY_PASS ? 0 : -EPERM;
+}
+
+/**
+ * evm_inode_setxattr - protect the EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ * @xattr_value: pointer to the new extended attribute value
+ * @xattr_value_len: pointer to the new extended attribute value length
+ *
+ * Updating 'security.evm' requires CAP_SYS_ADMIN privileges and that
+ * the current value is valid.
+ */
+int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ return evm_protect_xattr(dentry, xattr_name, xattr_value,
+ xattr_value_len);
+}
+
+/**
+ * evm_inode_removexattr - protect the EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ *
+ * Removing 'security.evm' requires CAP_SYS_ADMIN privileges and that
+ * the current value is valid.
+ */
+int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+ return evm_protect_xattr(dentry, xattr_name, NULL, 0);
+}
+
+/**
+ * evm_inode_post_setxattr - update 'security.evm' to reflect the changes
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ * @xattr_value: pointer to the new extended attribute value
+ * @xattr_value_len: pointer to the new extended attribute value length
+ *
+ * Update the HMAC stored in 'security.evm' to reflect the change.
+ *
+ * No need to take the i_mutex lock here, as this function is called from
+ * __vfs_setxattr_noperm(). The caller of which has taken the inode's
+ * i_mutex lock.
+ */
+void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ if (!evm_initialized || (!evm_protected_xattr(xattr_name)
+ && !posix_xattr_acl(xattr_name)))
+ return;
+
+ evm_update_evmxattr(dentry, xattr_name, xattr_value, xattr_value_len);
+ return;
+}
+
+/**
+ * evm_inode_post_removexattr - update 'security.evm' after removing the xattr
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ *
+ * Update the HMAC stored in 'security.evm' to reflect removal of the xattr.
+ */
+void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+ struct inode *inode = dentry->d_inode;
+
+ if (!evm_initialized || !evm_protected_xattr(xattr_name))
+ return;
+
+ mutex_lock(&inode->i_mutex);
+ evm_update_evmxattr(dentry, xattr_name, NULL, 0);
+ mutex_unlock(&inode->i_mutex);
+ return;
+}
+
+/**
+ * evm_inode_setattr - prevent updating an invalid EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ */
+int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ unsigned int ia_valid = attr->ia_valid;
+ enum integrity_status evm_status;
+
+ if (!(ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
+ return 0;
+ evm_status = evm_verify_current_integrity(dentry);
+ if ((evm_status == INTEGRITY_PASS) ||
+ (evm_status == INTEGRITY_NOXATTRS))
+ return 0;
+ return -EPERM;
+}
+
+/**
+ * evm_inode_post_setattr - update 'security.evm' after modifying metadata
+ * @dentry: pointer to the affected dentry
+ * @ia_valid: for the UID and GID status
+ *
+ * For now, update the HMAC stored in 'security.evm' to reflect UID/GID
+ * changes.
+ *
+ * This function is called from notify_change(), which expects the caller
+ * to lock the inode's i_mutex.
+ */
+void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
+{
+ if (!evm_initialized)
+ return;
+
+ if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
+ evm_update_evmxattr(dentry, NULL, NULL, 0);
+ return;
+}
+
+/*
+ * evm_inode_init_security - initializes security.evm
+ */
+int evm_inode_init_security(struct inode *inode,
+ const struct xattr *lsm_xattr,
+ struct xattr *evm_xattr)
+{
+ struct evm_ima_xattr_data *xattr_data;
+ int rc;
+
+ if (!evm_initialized || !evm_protected_xattr(lsm_xattr->name))
+ return 0;
+
+ xattr_data = kzalloc(sizeof(*xattr_data), GFP_NOFS);
+ if (!xattr_data)
+ return -ENOMEM;
+
+ xattr_data->type = EVM_XATTR_HMAC;
+ rc = evm_init_hmac(inode, lsm_xattr, xattr_data->digest);
+ if (rc < 0)
+ goto out;
+
+ evm_xattr->value = xattr_data;
+ evm_xattr->value_len = sizeof(*xattr_data);
+ evm_xattr->name = kstrdup(XATTR_EVM_SUFFIX, GFP_NOFS);
+ return 0;
+out:
+ kfree(xattr_data);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(evm_inode_init_security);
+
+static int __init init_evm(void)
+{
+ int error;
+
+ error = evm_init_secfs();
+ if (error < 0) {
+ printk(KERN_INFO "EVM: Error registering secfs\n");
+ goto err;
+ }
+err:
+ return error;
+}
+
+static void __exit cleanup_evm(void)
+{
+ evm_cleanup_secfs();
+ if (hmac_tfm)
+ crypto_free_shash(hmac_tfm);
+}
+
+/*
+ * evm_display_config - list the EVM protected security extended attributes
+ */
+static int __init evm_display_config(void)
+{
+ char **xattrname;
+
+ for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++)
+ printk(KERN_INFO "EVM: %s\n", *xattrname);
+ return 0;
+}
+
+pure_initcall(evm_display_config);
+late_initcall(init_evm);
+
+MODULE_DESCRIPTION("Extended Verification Module");
+MODULE_LICENSE("GPL");
diff --git a/security/integrity/evm/evm_posix_acl.c b/security/integrity/evm/evm_posix_acl.c
new file mode 100644
index 00000000000..b1753e98bf9
--- /dev/null
+++ b/security/integrity/evm/evm_posix_acl.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2011 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/xattr.h>
+
+int posix_xattr_acl(char *xattr)
+{
+ int xattr_len = strlen(xattr);
+
+ if ((strlen(XATTR_NAME_POSIX_ACL_ACCESS) == xattr_len)
+ && (strncmp(XATTR_NAME_POSIX_ACL_ACCESS, xattr, xattr_len) == 0))
+ return 1;
+ if ((strlen(XATTR_NAME_POSIX_ACL_DEFAULT) == xattr_len)
+ && (strncmp(XATTR_NAME_POSIX_ACL_DEFAULT, xattr, xattr_len) == 0))
+ return 1;
+ return 0;
+}
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
new file mode 100644
index 00000000000..ac762995057
--- /dev/null
+++ b/security/integrity/evm/evm_secfs.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_secfs.c
+ * - Used to signal when key is on keyring
+ * - Get the key and enable EVM
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include "evm.h"
+
+static struct dentry *evm_init_tpm;
+
+/**
+ * evm_read_key - read() for <securityfs>/evm
+ *
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t evm_read_key(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ ssize_t rc;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(temp, "%d", evm_initialized);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+ return rc;
+}
+
+/**
+ * evm_write_key - write() for <securityfs>/evm
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Used to signal that key is on the kernel key ring.
+ * - get the integrity hmac key from the kernel key ring
+ * - create list of hmac protected extended attributes
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t evm_write_key(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ int i, error;
+
+ if (!capable(CAP_SYS_ADMIN) || evm_initialized)
+ return -EPERM;
+
+ if (count >= sizeof(temp) || count == 0)
+ return -EINVAL;
+
+ if (copy_from_user(temp, buf, count) != 0)
+ return -EFAULT;
+
+ temp[count] = '\0';
+
+ if ((sscanf(temp, "%d", &i) != 1) || (i != 1))
+ return -EINVAL;
+
+ error = evm_init_key();
+ if (!error) {
+ evm_initialized = 1;
+ pr_info("EVM: initialized\n");
+ } else
+ pr_err("EVM: initialization failed\n");
+ return count;
+}
+
+static const struct file_operations evm_key_ops = {
+ .read = evm_read_key,
+ .write = evm_write_key,
+};
+
+int __init evm_init_secfs(void)
+{
+ int error = 0;
+
+ evm_init_tpm = securityfs_create_file("evm", S_IRUSR | S_IRGRP,
+ NULL, NULL, &evm_key_ops);
+ if (!evm_init_tpm || IS_ERR(evm_init_tpm))
+ error = -EFAULT;
+ return error;
+}
+
+void __exit evm_cleanup_secfs(void)
+{
+ if (evm_init_tpm)
+ securityfs_remove(evm_init_tpm);
+}
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
new file mode 100644
index 00000000000..399641c3e84
--- /dev/null
+++ b/security/integrity/iint.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2008 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: integrity_iint.c
+ * - implements the integrity hooks: integrity_inode_alloc,
+ * integrity_inode_free
+ * - cache integrity information associated with an inode
+ * using a rbtree tree.
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/rbtree.h>
+#include "integrity.h"
+
+static struct rb_root integrity_iint_tree = RB_ROOT;
+static DEFINE_SPINLOCK(integrity_iint_lock);
+static struct kmem_cache *iint_cache __read_mostly;
+
+int iint_initialized;
+
+/*
+ * __integrity_iint_find - return the iint associated with an inode
+ */
+static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+ struct rb_node *n = integrity_iint_tree.rb_node;
+
+ assert_spin_locked(&integrity_iint_lock);
+
+ while (n) {
+ iint = rb_entry(n, struct integrity_iint_cache, rb_node);
+
+ if (inode < iint->inode)
+ n = n->rb_left;
+ else if (inode > iint->inode)
+ n = n->rb_right;
+ else
+ break;
+ }
+ if (!n)
+ return NULL;
+
+ return iint;
+}
+
+/*
+ * integrity_iint_find - return the iint associated with an inode
+ */
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+
+ if (!IS_IMA(inode))
+ return NULL;
+
+ spin_lock(&integrity_iint_lock);
+ iint = __integrity_iint_find(inode);
+ spin_unlock(&integrity_iint_lock);
+
+ return iint;
+}
+
+static void iint_free(struct integrity_iint_cache *iint)
+{
+ iint->version = 0;
+ iint->flags = 0UL;
+ iint->evm_status = INTEGRITY_UNKNOWN;
+ kmem_cache_free(iint_cache, iint);
+}
+
+/**
+ * integrity_inode_alloc - allocate an iint associated with an inode
+ * @inode: pointer to the inode
+ */
+int integrity_inode_alloc(struct inode *inode)
+{
+ struct rb_node **p;
+ struct rb_node *new_node, *parent = NULL;
+ struct integrity_iint_cache *new_iint, *test_iint;
+ int rc;
+
+ new_iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
+ if (!new_iint)
+ return -ENOMEM;
+
+ new_iint->inode = inode;
+ new_node = &new_iint->rb_node;
+
+ mutex_lock(&inode->i_mutex); /* i_flags */
+ spin_lock(&integrity_iint_lock);
+
+ p = &integrity_iint_tree.rb_node;
+ while (*p) {
+ parent = *p;
+ test_iint = rb_entry(parent, struct integrity_iint_cache,
+ rb_node);
+ rc = -EEXIST;
+ if (inode < test_iint->inode)
+ p = &(*p)->rb_left;
+ else if (inode > test_iint->inode)
+ p = &(*p)->rb_right;
+ else
+ goto out_err;
+ }
+
+ inode->i_flags |= S_IMA;
+ rb_link_node(new_node, parent, p);
+ rb_insert_color(new_node, &integrity_iint_tree);
+
+ spin_unlock(&integrity_iint_lock);
+ mutex_unlock(&inode->i_mutex); /* i_flags */
+
+ return 0;
+out_err:
+ spin_unlock(&integrity_iint_lock);
+ mutex_unlock(&inode->i_mutex); /* i_flags */
+ iint_free(new_iint);
+
+ return rc;
+}
+
+/**
+ * integrity_inode_free - called on security_inode_free
+ * @inode: pointer to the inode
+ *
+ * Free the integrity information(iint) associated with an inode.
+ */
+void integrity_inode_free(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+
+ if (!IS_IMA(inode))
+ return;
+
+ spin_lock(&integrity_iint_lock);
+ iint = __integrity_iint_find(inode);
+ rb_erase(&iint->rb_node, &integrity_iint_tree);
+ spin_unlock(&integrity_iint_lock);
+
+ iint_free(iint);
+}
+
+static void init_once(void *foo)
+{
+ struct integrity_iint_cache *iint = foo;
+
+ memset(iint, 0, sizeof *iint);
+ iint->version = 0;
+ iint->flags = 0UL;
+ mutex_init(&iint->mutex);
+ iint->evm_status = INTEGRITY_UNKNOWN;
+}
+
+static int __init integrity_iintcache_init(void)
+{
+ iint_cache =
+ kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
+ 0, SLAB_PANIC, init_once);
+ iint_initialized = 1;
+ return 0;
+}
+security_initcall(integrity_iintcache_init);
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index b6ecfd4d8d7..19c053b8230 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -3,6 +3,7 @@
config IMA
bool "Integrity Measurement Architecture(IMA)"
depends on SECURITY
+ select INTEGRITY
select SECURITYFS
select CRYPTO
select CRYPTO_HMAC
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
index 787c4cb916c..5690c021de8 100644
--- a/security/integrity/ima/Makefile
+++ b/security/integrity/ima/Makefile
@@ -6,4 +6,4 @@
obj-$(CONFIG_IMA) += ima.o
ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
- ima_policy.o ima_iint.o ima_audit.o
+ ima_policy.o ima_audit.o
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 08408bd7146..3ccf7acac6d 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -24,18 +24,19 @@
#include <linux/tpm.h>
#include <linux/audit.h>
+#include "../integrity.h"
+
enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_ASCII };
enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
/* digest size for IMA, fits SHA1 or MD5 */
-#define IMA_DIGEST_SIZE 20
+#define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE
#define IMA_EVENT_NAME_LEN_MAX 255
#define IMA_HASH_BITS 9
#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
/* set during initialization */
-extern int iint_initialized;
extern int ima_initialized;
extern int ima_used_chip;
extern char *ima_hash;
@@ -96,34 +97,21 @@ static inline unsigned long ima_hash_key(u8 *digest)
return hash_long(*digest, IMA_HASH_BITS);
}
-/* iint cache flags */
-#define IMA_MEASURED 0x01
-
-/* integrity data associated with an inode */
-struct ima_iint_cache {
- struct rb_node rb_node; /* rooted in ima_iint_tree */
- struct inode *inode; /* back pointer to inode in question */
- u64 version; /* track inode changes */
- unsigned char flags;
- u8 digest[IMA_DIGEST_SIZE];
- struct mutex mutex; /* protects: version, flags, digest */
-};
-
/* LIM API function definitions */
int ima_must_measure(struct inode *inode, int mask, int function);
-int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file);
-void ima_store_measurement(struct ima_iint_cache *iint, struct file *file,
+int ima_collect_measurement(struct integrity_iint_cache *iint,
+ struct file *file);
+void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file,
const unsigned char *filename);
int ima_store_template(struct ima_template_entry *entry, int violation,
struct inode *inode);
-void ima_template_show(struct seq_file *m, void *e,
- enum ima_show_type show);
+void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show);
/* rbtree tree calls to lookup, insert, delete
* integrity data associated with an inode.
*/
-struct ima_iint_cache *ima_iint_insert(struct inode *inode);
-struct ima_iint_cache *ima_iint_find(struct inode *inode);
+struct integrity_iint_cache *integrity_iint_insert(struct inode *inode);
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
/* IMA policy related functions */
enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK };
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index da36d2c085a..0d50df04ccc 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -126,7 +126,8 @@ int ima_must_measure(struct inode *inode, int mask, int function)
*
* Return 0 on success, error code otherwise
*/
-int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file)
+int ima_collect_measurement(struct integrity_iint_cache *iint,
+ struct file *file)
{
int result = -EEXIST;
@@ -156,8 +157,8 @@ int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file)
*
* Must be called with iint->mutex held.
*/
-void ima_store_measurement(struct ima_iint_cache *iint, struct file *file,
- const unsigned char *filename)
+void ima_store_measurement(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename)
{
const char *op = "add_template_measure";
const char *audit_cause = "ENOMEM";
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index ef21b96a0b4..e1aa2b482dd 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -287,7 +287,7 @@ static atomic_t policy_opencount = ATOMIC_INIT(1);
/*
* ima_open_policy: sequentialize access to the policy file
*/
-int ima_open_policy(struct inode * inode, struct file * filp)
+static int ima_open_policy(struct inode * inode, struct file * filp)
{
/* No point in being allowed to open it if you aren't going to write */
if (!(filp->f_flags & O_WRONLY))
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
deleted file mode 100644
index 4ae73040ab7..00000000000
--- a/security/integrity/ima/ima_iint.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright (C) 2008 IBM Corporation
- *
- * Authors:
- * Mimi Zohar <zohar@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2 of the
- * License.
- *
- * File: ima_iint.c
- * - implements the IMA hooks: ima_inode_alloc, ima_inode_free
- * - cache integrity information associated with an inode
- * using a rbtree tree.
- */
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/rbtree.h>
-#include "ima.h"
-
-static struct rb_root ima_iint_tree = RB_ROOT;
-static DEFINE_SPINLOCK(ima_iint_lock);
-static struct kmem_cache *iint_cache __read_mostly;
-
-int iint_initialized = 0;
-
-/*
- * __ima_iint_find - return the iint associated with an inode
- */
-static struct ima_iint_cache *__ima_iint_find(struct inode *inode)
-{
- struct ima_iint_cache *iint;
- struct rb_node *n = ima_iint_tree.rb_node;
-
- assert_spin_locked(&ima_iint_lock);
-
- while (n) {
- iint = rb_entry(n, struct ima_iint_cache, rb_node);
-
- if (inode < iint->inode)
- n = n->rb_left;
- else if (inode > iint->inode)
- n = n->rb_right;
- else
- break;
- }
- if (!n)
- return NULL;
-
- return iint;
-}
-
-/*
- * ima_iint_find - return the iint associated with an inode
- */
-struct ima_iint_cache *ima_iint_find(struct inode *inode)
-{
- struct ima_iint_cache *iint;
-
- if (!IS_IMA(inode))
- return NULL;
-
- spin_lock(&ima_iint_lock);
- iint = __ima_iint_find(inode);
- spin_unlock(&ima_iint_lock);
-
- return iint;
-}
-
-static void iint_free(struct ima_iint_cache *iint)
-{
- iint->version = 0;
- iint->flags = 0UL;
- kmem_cache_free(iint_cache, iint);
-}
-
-/**
- * ima_inode_alloc - allocate an iint associated with an inode
- * @inode: pointer to the inode
- */
-int ima_inode_alloc(struct inode *inode)
-{
- struct rb_node **p;
- struct rb_node *new_node, *parent = NULL;
- struct ima_iint_cache *new_iint, *test_iint;
- int rc;
-
- new_iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
- if (!new_iint)
- return -ENOMEM;
-
- new_iint->inode = inode;
- new_node = &new_iint->rb_node;
-
- mutex_lock(&inode->i_mutex); /* i_flags */
- spin_lock(&ima_iint_lock);
-
- p = &ima_iint_tree.rb_node;
- while (*p) {
- parent = *p;
- test_iint = rb_entry(parent, struct ima_iint_cache, rb_node);
-
- rc = -EEXIST;
- if (inode < test_iint->inode)
- p = &(*p)->rb_left;
- else if (inode > test_iint->inode)
- p = &(*p)->rb_right;
- else
- goto out_err;
- }
-
- inode->i_flags |= S_IMA;
- rb_link_node(new_node, parent, p);
- rb_insert_color(new_node, &ima_iint_tree);
-
- spin_unlock(&ima_iint_lock);
- mutex_unlock(&inode->i_mutex); /* i_flags */
-
- return 0;
-out_err:
- spin_unlock(&ima_iint_lock);
- mutex_unlock(&inode->i_mutex); /* i_flags */
- iint_free(new_iint);
-
- return rc;
-}
-
-/**
- * ima_inode_free - called on security_inode_free
- * @inode: pointer to the inode
- *
- * Free the integrity information(iint) associated with an inode.
- */
-void ima_inode_free(struct inode *inode)
-{
- struct ima_iint_cache *iint;
-
- if (!IS_IMA(inode))
- return;
-
- spin_lock(&ima_iint_lock);
- iint = __ima_iint_find(inode);
- rb_erase(&iint->rb_node, &ima_iint_tree);
- spin_unlock(&ima_iint_lock);
-
- iint_free(iint);
-}
-
-static void init_once(void *foo)
-{
- struct ima_iint_cache *iint = foo;
-
- memset(iint, 0, sizeof *iint);
- iint->version = 0;
- iint->flags = 0UL;
- mutex_init(&iint->mutex);
-}
-
-static int __init ima_iintcache_init(void)
-{
- iint_cache =
- kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
- SLAB_PANIC, init_once);
- iint_initialized = 1;
- return 0;
-}
-security_initcall(ima_iintcache_init);
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 26b46ff7466..1eff5cb001e 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -22,6 +22,7 @@
#include <linux/mount.h>
#include <linux/mman.h>
#include <linux/slab.h>
+#include <linux/ima.h>
#include "ima.h"
@@ -82,7 +83,7 @@ out:
"open_writers");
}
-static void ima_check_last_writer(struct ima_iint_cache *iint,
+static void ima_check_last_writer(struct integrity_iint_cache *iint,
struct inode *inode,
struct file *file)
{
@@ -105,12 +106,12 @@ static void ima_check_last_writer(struct ima_iint_cache *iint,
void ima_file_free(struct file *file)
{
struct inode *inode = file->f_dentry->d_inode;
- struct ima_iint_cache *iint;
+ struct integrity_iint_cache *iint;
if (!iint_initialized || !S_ISREG(inode->i_mode))
return;
- iint = ima_iint_find(inode);
+ iint = integrity_iint_find(inode);
if (!iint)
return;
@@ -121,7 +122,7 @@ static int process_measurement(struct file *file, const unsigned char *filename,
int mask, int function)
{
struct inode *inode = file->f_dentry->d_inode;
- struct ima_iint_cache *iint;
+ struct integrity_iint_cache *iint;
int rc = 0;
if (!ima_initialized || !S_ISREG(inode->i_mode))
@@ -131,9 +132,9 @@ static int process_measurement(struct file *file, const unsigned char *filename,
if (rc != 0)
return rc;
retry:
- iint = ima_iint_find(inode);
+ iint = integrity_iint_find(inode);
if (!iint) {
- rc = ima_inode_alloc(inode);
+ rc = integrity_inode_alloc(inode);
if (!rc || rc == -EEXIST)
goto retry;
return rc;
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
new file mode 100644
index 00000000000..3143a3c3986
--- /dev/null
+++ b/security/integrity/integrity.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2009-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/integrity.h>
+#include <crypto/sha.h>
+
+/* iint cache flags */
+#define IMA_MEASURED 0x01
+
+enum evm_ima_xattr_type {
+ IMA_XATTR_DIGEST = 0x01,
+ EVM_XATTR_HMAC,
+ EVM_IMA_XATTR_DIGSIG,
+};
+
+struct evm_ima_xattr_data {
+ u8 type;
+ u8 digest[SHA1_DIGEST_SIZE];
+} __attribute__((packed));
+
+/* integrity data associated with an inode */
+struct integrity_iint_cache {
+ struct rb_node rb_node; /* rooted in integrity_iint_tree */
+ struct inode *inode; /* back pointer to inode in question */
+ u64 version; /* track inode changes */
+ unsigned char flags;
+ u8 digest[SHA1_DIGEST_SIZE];
+ struct mutex mutex; /* protects: version, flags, digest */
+ enum integrity_status evm_status;
+};
+
+/* rbtree tree calls to lookup, insert, delete
+ * integrity data associated with an inode.
+ */
+struct integrity_iint_cache *integrity_iint_insert(struct inode *inode);
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
+
+/* set during initialization */
+extern int iint_initialized;
diff --git a/security/keys/Makefile b/security/keys/Makefile
index b34cc6ee690..a56f1ffdc64 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -14,7 +14,7 @@ obj-y := \
user_defined.o
obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
-obj-$(CONFIG_ENCRYPTED_KEYS) += ecryptfs_format.o encrypted.o
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/
obj-$(CONFIG_KEYS_COMPAT) += compat.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/security/keys/encrypted-keys/Makefile b/security/keys/encrypted-keys/Makefile
new file mode 100644
index 00000000000..6bc7a86d102
--- /dev/null
+++ b/security/keys/encrypted-keys/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for encrypted keys
+#
+
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted.o ecryptfs_format.o
+obj-$(CONFIG_TRUSTED_KEYS) += masterkey_trusted.o
diff --git a/security/keys/ecryptfs_format.c b/security/keys/encrypted-keys/ecryptfs_format.c
index 6daa3b6ff9e..6daa3b6ff9e 100644
--- a/security/keys/ecryptfs_format.c
+++ b/security/keys/encrypted-keys/ecryptfs_format.c
diff --git a/security/keys/ecryptfs_format.h b/security/keys/encrypted-keys/ecryptfs_format.h
index 40294de238b..40294de238b 100644
--- a/security/keys/ecryptfs_format.h
+++ b/security/keys/encrypted-keys/ecryptfs_format.h
diff --git a/security/keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index e7eca9ec4c6..f33804c1b4c 100644
--- a/security/keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -299,31 +299,6 @@ out:
}
/*
- * request_trusted_key - request the trusted key
- *
- * Trusted keys are sealed to PCRs and other metadata. Although userspace
- * manages both trusted/encrypted key-types, like the encrypted key type
- * data, trusted key type data is not visible decrypted from userspace.
- */
-static struct key *request_trusted_key(const char *trusted_desc,
- u8 **master_key, size_t *master_keylen)
-{
- struct trusted_key_payload *tpayload;
- struct key *tkey;
-
- tkey = request_key(&key_type_trusted, trusted_desc, NULL);
- if (IS_ERR(tkey))
- goto error;
-
- down_read(&tkey->sem);
- tpayload = rcu_dereference(tkey->payload.data);
- *master_key = tpayload->key;
- *master_keylen = tpayload->key_len;
-error:
- return tkey;
-}
-
-/*
* request_user_key - request the user key
*
* Use a user provided key to encrypt/decrypt an encrypted-key.
@@ -469,8 +444,14 @@ static struct key *request_master_key(struct encrypted_key_payload *epayload,
goto out;
if (IS_ERR(mkey)) {
- pr_info("encrypted_key: key %s not found",
- epayload->master_desc);
+ int ret = PTR_ERR(epayload);
+
+ if (ret == -ENOTSUPP)
+ pr_info("encrypted_key: key %s not supported",
+ epayload->master_desc);
+ else
+ pr_info("encrypted_key: key %s not found",
+ epayload->master_desc);
goto out;
}
@@ -686,11 +667,19 @@ static int encrypted_key_decrypt(struct encrypted_key_payload *epayload,
return -EINVAL;
hex_encoded_data = hex_encoded_iv + (2 * ivsize) + 2;
- hex2bin(epayload->iv, hex_encoded_iv, ivsize);
- hex2bin(epayload->encrypted_data, hex_encoded_data, encrypted_datalen);
+ ret = hex2bin(epayload->iv, hex_encoded_iv, ivsize);
+ if (ret < 0)
+ return -EINVAL;
+ ret = hex2bin(epayload->encrypted_data, hex_encoded_data,
+ encrypted_datalen);
+ if (ret < 0)
+ return -EINVAL;
hmac = epayload->format + epayload->datablob_len;
- hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2), HASH_SIZE);
+ ret = hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2),
+ HASH_SIZE);
+ if (ret < 0)
+ return -EINVAL;
mkey = request_master_key(epayload, &master_key, &master_keylen);
if (IS_ERR(mkey))
diff --git a/security/keys/encrypted.h b/security/keys/encrypted-keys/encrypted.h
index cef5e2f2b7d..b6ade894525 100644
--- a/security/keys/encrypted.h
+++ b/security/keys/encrypted-keys/encrypted.h
@@ -2,6 +2,17 @@
#define __ENCRYPTED_KEY_H
#define ENCRYPTED_DEBUG 0
+#ifdef CONFIG_TRUSTED_KEYS
+extern struct key *request_trusted_key(const char *trusted_desc,
+ u8 **master_key, size_t *master_keylen);
+#else
+static inline struct key *request_trusted_key(const char *trusted_desc,
+ u8 **master_key,
+ size_t *master_keylen)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif
#if ENCRYPTED_DEBUG
static inline void dump_master_key(const u8 *master_key, size_t master_keylen)
diff --git a/security/keys/encrypted-keys/masterkey_trusted.c b/security/keys/encrypted-keys/masterkey_trusted.c
new file mode 100644
index 00000000000..df87272e3f5
--- /dev/null
+++ b/security/keys/encrypted-keys/masterkey_trusted.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * See Documentation/security/keys-trusted-encrypted.txt
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <keys/trusted-type.h>
+
+/*
+ * request_trusted_key - request the trusted key
+ *
+ * Trusted keys are sealed to PCRs and other metadata. Although userspace
+ * manages both trusted/encrypted key-types, like the encrypted key type
+ * data, trusted key type data is not visible decrypted from userspace.
+ */
+struct key *request_trusted_key(const char *trusted_desc,
+ u8 **master_key, size_t *master_keylen)
+{
+ struct trusted_key_payload *tpayload;
+ struct key *tkey;
+
+ tkey = request_key(&key_type_trusted, trusted_desc, NULL);
+ if (IS_ERR(tkey))
+ goto error;
+
+ down_read(&tkey->sem);
+ tpayload = rcu_dereference(tkey->payload.data);
+ *master_key = tpayload->key;
+ *master_keylen = tpayload->key_len;
+error:
+ return tkey;
+}
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 89df6b5f203..bf4d8da5a79 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -1,6 +1,6 @@
/* Key garbage collector
*
- * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2009-2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -10,6 +10,8 @@
*/
#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/security.h>
#include <keys/keyring-type.h>
#include "internal.h"
@@ -19,17 +21,33 @@
unsigned key_gc_delay = 5 * 60;
/*
- * Reaper
+ * Reaper for unused keys.
+ */
+static void key_garbage_collector(struct work_struct *work);
+DECLARE_WORK(key_gc_work, key_garbage_collector);
+
+/*
+ * Reaper for links from keyrings to dead keys.
*/
static void key_gc_timer_func(unsigned long);
-static void key_garbage_collector(struct work_struct *);
static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0);
-static DECLARE_WORK(key_gc_work, key_garbage_collector);
-static key_serial_t key_gc_cursor; /* the last key the gc considered */
-static bool key_gc_again;
-static unsigned long key_gc_executing;
+
static time_t key_gc_next_run = LONG_MAX;
-static time_t key_gc_new_timer;
+static struct key_type *key_gc_dead_keytype;
+
+static unsigned long key_gc_flags;
+#define KEY_GC_KEY_EXPIRED 0 /* A key expired and needs unlinking */
+#define KEY_GC_REAP_KEYTYPE 1 /* A keytype is being unregistered */
+#define KEY_GC_REAPING_KEYTYPE 2 /* Cleared when keytype reaped */
+
+
+/*
+ * Any key whose type gets unregistered will be re-typed to this if it can't be
+ * immediately unlinked.
+ */
+struct key_type key_type_dead = {
+ .name = "dead",
+};
/*
* Schedule a garbage collection run.
@@ -42,31 +60,75 @@ void key_schedule_gc(time_t gc_at)
kenter("%ld", gc_at - now);
- if (gc_at <= now) {
- schedule_work(&key_gc_work);
+ if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
+ kdebug("IMMEDIATE");
+ queue_work(system_nrt_wq, &key_gc_work);
} else if (gc_at < key_gc_next_run) {
+ kdebug("DEFERRED");
+ key_gc_next_run = gc_at;
expires = jiffies + (gc_at - now) * HZ;
mod_timer(&key_gc_timer, expires);
}
}
/*
- * The garbage collector timer kicked off
+ * Some key's cleanup time was met after it expired, so we need to get the
+ * reaper to go through a cycle finding expired keys.
*/
static void key_gc_timer_func(unsigned long data)
{
kenter("");
key_gc_next_run = LONG_MAX;
- schedule_work(&key_gc_work);
+ set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
+ queue_work(system_nrt_wq, &key_gc_work);
+}
+
+/*
+ * wait_on_bit() sleep function for uninterruptible waiting
+ */
+static int key_gc_wait_bit(void *flags)
+{
+ schedule();
+ return 0;
+}
+
+/*
+ * Reap keys of dead type.
+ *
+ * We use three flags to make sure we see three complete cycles of the garbage
+ * collector: the first to mark keys of that type as being dead, the second to
+ * collect dead links and the third to clean up the dead keys. We have to be
+ * careful as there may already be a cycle in progress.
+ *
+ * The caller must be holding key_types_sem.
+ */
+void key_gc_keytype(struct key_type *ktype)
+{
+ kenter("%s", ktype->name);
+
+ key_gc_dead_keytype = ktype;
+ set_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
+ smp_mb();
+ set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
+
+ kdebug("schedule");
+ queue_work(system_nrt_wq, &key_gc_work);
+
+ kdebug("sleep");
+ wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit,
+ TASK_UNINTERRUPTIBLE);
+
+ key_gc_dead_keytype = NULL;
+ kleave("");
}
/*
* Garbage collect pointers from a keyring.
*
- * Return true if we altered the keyring.
+ * Not called with any locks held. The keyring's key struct will not be
+ * deallocated under us as only our caller may deallocate it.
*/
-static bool key_gc_keyring(struct key *keyring, time_t limit)
- __releases(key_serial_lock)
+static void key_gc_keyring(struct key *keyring, time_t limit)
{
struct keyring_list *klist;
struct key *key;
@@ -93,130 +155,234 @@ static bool key_gc_keyring(struct key *keyring, time_t limit)
unlock_dont_gc:
rcu_read_unlock();
dont_gc:
- kleave(" = false");
- return false;
+ kleave(" [no gc]");
+ return;
do_gc:
rcu_read_unlock();
- key_gc_cursor = keyring->serial;
- key_get(keyring);
- spin_unlock(&key_serial_lock);
+
keyring_gc(keyring, limit);
- key_put(keyring);
- kleave(" = true");
- return true;
+ kleave(" [gc]");
}
/*
- * Garbage collector for keys. This involves scanning the keyrings for dead,
- * expired and revoked keys that have overstayed their welcome
+ * Garbage collect an unreferenced, detached key
*/
-static void key_garbage_collector(struct work_struct *work)
+static noinline void key_gc_unused_key(struct key *key)
{
- struct rb_node *rb;
- key_serial_t cursor;
- struct key *key, *xkey;
- time_t new_timer = LONG_MAX, limit, now;
-
- now = current_kernel_time().tv_sec;
- kenter("[%x,%ld]", key_gc_cursor, key_gc_new_timer - now);
-
- if (test_and_set_bit(0, &key_gc_executing)) {
- key_schedule_gc(current_kernel_time().tv_sec + 1);
- kleave(" [busy; deferring]");
- return;
+ key_check(key);
+
+ security_key_free(key);
+
+ /* deal with the user's key tracking and quota */
+ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+ spin_lock(&key->user->lock);
+ key->user->qnkeys--;
+ key->user->qnbytes -= key->quotalen;
+ spin_unlock(&key->user->lock);
}
- limit = now;
+ atomic_dec(&key->user->nkeys);
+ if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+ atomic_dec(&key->user->nikeys);
+
+ key_user_put(key->user);
+
+ /* now throw away the key memory */
+ if (key->type->destroy)
+ key->type->destroy(key);
+
+ kfree(key->description);
+
+#ifdef KEY_DEBUGGING
+ key->magic = KEY_DEBUG_MAGIC_X;
+#endif
+ kmem_cache_free(key_jar, key);
+}
+
+/*
+ * Garbage collector for unused keys.
+ *
+ * This is done in process context so that we don't have to disable interrupts
+ * all over the place. key_put() schedules this rather than trying to do the
+ * cleanup itself, which means key_put() doesn't have to sleep.
+ */
+static void key_garbage_collector(struct work_struct *work)
+{
+ static u8 gc_state; /* Internal persistent state */
+#define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */
+#define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */
+#define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */
+#define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */
+#define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */
+#define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */
+#define KEY_GC_FOUND_DEAD_KEY 0x80 /* - We found at least one dead key */
+
+ struct rb_node *cursor;
+ struct key *key;
+ time_t new_timer, limit;
+
+ kenter("[%lx,%x]", key_gc_flags, gc_state);
+
+ limit = current_kernel_time().tv_sec;
if (limit > key_gc_delay)
limit -= key_gc_delay;
else
limit = key_gc_delay;
+ /* Work out what we're going to be doing in this pass */
+ gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2;
+ gc_state <<= 1;
+ if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags))
+ gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER;
+
+ if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags))
+ gc_state |= KEY_GC_REAPING_DEAD_1;
+ kdebug("new pass %x", gc_state);
+
+ new_timer = LONG_MAX;
+
+ /* As only this function is permitted to remove things from the key
+ * serial tree, if cursor is non-NULL then it will always point to a
+ * valid node in the tree - even if lock got dropped.
+ */
spin_lock(&key_serial_lock);
+ cursor = rb_first(&key_serial_tree);
- if (unlikely(RB_EMPTY_ROOT(&key_serial_tree))) {
- spin_unlock(&key_serial_lock);
- clear_bit(0, &key_gc_executing);
- return;
- }
+continue_scanning:
+ while (cursor) {
+ key = rb_entry(cursor, struct key, serial_node);
+ cursor = rb_next(cursor);
- cursor = key_gc_cursor;
- if (cursor < 0)
- cursor = 0;
- if (cursor > 0)
- new_timer = key_gc_new_timer;
- else
- key_gc_again = false;
-
- /* find the first key above the cursor */
- key = NULL;
- rb = key_serial_tree.rb_node;
- while (rb) {
- xkey = rb_entry(rb, struct key, serial_node);
- if (cursor < xkey->serial) {
- key = xkey;
- rb = rb->rb_left;
- } else if (cursor > xkey->serial) {
- rb = rb->rb_right;
- } else {
- rb = rb_next(rb);
- if (!rb)
- goto reached_the_end;
- key = rb_entry(rb, struct key, serial_node);
- break;
+ if (atomic_read(&key->usage) == 0)
+ goto found_unreferenced_key;
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) {
+ if (key->type == key_gc_dead_keytype) {
+ gc_state |= KEY_GC_FOUND_DEAD_KEY;
+ set_bit(KEY_FLAG_DEAD, &key->flags);
+ key->perm = 0;
+ goto skip_dead_key;
+ }
+ }
+
+ if (gc_state & KEY_GC_SET_TIMER) {
+ if (key->expiry > limit && key->expiry < new_timer) {
+ kdebug("will expire %x in %ld",
+ key_serial(key), key->expiry - limit);
+ new_timer = key->expiry;
+ }
}
- }
- if (!key)
- goto reached_the_end;
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2))
+ if (key->type == key_gc_dead_keytype)
+ gc_state |= KEY_GC_FOUND_DEAD_KEY;
- /* trawl through the keys looking for keyrings */
- for (;;) {
- if (key->expiry > limit && key->expiry < new_timer) {
- kdebug("will expire %x in %ld",
- key_serial(key), key->expiry - limit);
- new_timer = key->expiry;
+ if ((gc_state & KEY_GC_REAPING_LINKS) ||
+ unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
+ if (key->type == &key_type_keyring)
+ goto found_keyring;
}
- if (key->type == &key_type_keyring &&
- key_gc_keyring(key, limit))
- /* the gc had to release our lock so that the keyring
- * could be modified, so we have to get it again */
- goto gc_released_our_lock;
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3))
+ if (key->type == key_gc_dead_keytype)
+ goto destroy_dead_key;
- rb = rb_next(&key->serial_node);
- if (!rb)
- goto reached_the_end;
- key = rb_entry(rb, struct key, serial_node);
+ skip_dead_key:
+ if (spin_is_contended(&key_serial_lock) || need_resched())
+ goto contended;
}
-gc_released_our_lock:
- kdebug("gc_released_our_lock");
- key_gc_new_timer = new_timer;
- key_gc_again = true;
- clear_bit(0, &key_gc_executing);
- schedule_work(&key_gc_work);
- kleave(" [continue]");
- return;
-
- /* when we reach the end of the run, we set the timer for the next one */
-reached_the_end:
- kdebug("reached_the_end");
+contended:
spin_unlock(&key_serial_lock);
- key_gc_new_timer = new_timer;
- key_gc_cursor = 0;
- clear_bit(0, &key_gc_executing);
-
- if (key_gc_again) {
- /* there may have been a key that expired whilst we were
- * scanning, so if we discarded any links we should do another
- * scan */
- new_timer = now + 1;
- key_schedule_gc(new_timer);
- } else if (new_timer < LONG_MAX) {
+
+maybe_resched:
+ if (cursor) {
+ cond_resched();
+ spin_lock(&key_serial_lock);
+ goto continue_scanning;
+ }
+
+ /* We've completed the pass. Set the timer if we need to and queue a
+ * new cycle if necessary. We keep executing cycles until we find one
+ * where we didn't reap any keys.
+ */
+ kdebug("pass complete");
+
+ if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) {
new_timer += key_gc_delay;
key_schedule_gc(new_timer);
}
- kleave(" [end]");
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
+ /* Make sure everyone revalidates their keys if we marked a
+ * bunch as being dead and make sure all keyring ex-payloads
+ * are destroyed.
+ */
+ kdebug("dead sync");
+ synchronize_rcu();
+ }
+
+ if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 |
+ KEY_GC_REAPING_DEAD_2))) {
+ if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) {
+ /* No remaining dead keys: short circuit the remaining
+ * keytype reap cycles.
+ */
+ kdebug("dead short");
+ gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2);
+ gc_state |= KEY_GC_REAPING_DEAD_3;
+ } else {
+ gc_state |= KEY_GC_REAP_AGAIN;
+ }
+ }
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) {
+ kdebug("dead wake");
+ smp_mb();
+ clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
+ wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE);
+ }
+
+ if (gc_state & KEY_GC_REAP_AGAIN)
+ queue_work(system_nrt_wq, &key_gc_work);
+ kleave(" [end %x]", gc_state);
+ return;
+
+ /* We found an unreferenced key - once we've removed it from the tree,
+ * we can safely drop the lock.
+ */
+found_unreferenced_key:
+ kdebug("unrefd key %d", key->serial);
+ rb_erase(&key->serial_node, &key_serial_tree);
+ spin_unlock(&key_serial_lock);
+
+ key_gc_unused_key(key);
+ gc_state |= KEY_GC_REAP_AGAIN;
+ goto maybe_resched;
+
+ /* We found a keyring and we need to check the payload for links to
+ * dead or expired keys. We don't flag another reap immediately as we
+ * have to wait for the old payload to be destroyed by RCU before we
+ * can reap the keys to which it refers.
+ */
+found_keyring:
+ spin_unlock(&key_serial_lock);
+ kdebug("scan keyring %d", key->serial);
+ key_gc_keyring(key, limit);
+ goto maybe_resched;
+
+ /* We found a dead key that is still referenced. Reset its type and
+ * destroy its payload with its semaphore held.
+ */
+destroy_dead_key:
+ spin_unlock(&key_serial_lock);
+ kdebug("destroy key %d", key->serial);
+ down_write(&key->sem);
+ key->type = &key_type_dead;
+ if (key_gc_dead_keytype->destroy)
+ key_gc_dead_keytype->destroy(key);
+ memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
+ up_write(&key->sem);
+ goto maybe_resched;
}
diff --git a/security/keys/internal.h b/security/keys/internal.h
index f375152a250..c7a7caec483 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -31,6 +31,7 @@
no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
#endif
+extern struct key_type key_type_dead;
extern struct key_type key_type_user;
/*****************************************************************************/
@@ -75,6 +76,7 @@ extern unsigned key_quota_maxbytes;
#define KEYQUOTA_LINK_BYTES 4 /* a link in a keyring is worth 4 bytes */
+extern struct kmem_cache *key_jar;
extern struct rb_root key_serial_tree;
extern spinlock_t key_serial_lock;
extern struct mutex key_construction_mutex;
@@ -146,9 +148,11 @@ extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
extern long join_session_keyring(const char *name);
+extern struct work_struct key_gc_work;
extern unsigned key_gc_delay;
extern void keyring_gc(struct key *keyring, time_t limit);
extern void key_schedule_gc(time_t expiry_at);
+extern void key_gc_keytype(struct key_type *ktype);
extern int key_task_permission(const key_ref_t key_ref,
const struct cred *cred,
diff --git a/security/keys/key.c b/security/keys/key.c
index f7f9d93f08d..4414abddcb5 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -21,7 +21,7 @@
#include <linux/user_namespace.h>
#include "internal.h"
-static struct kmem_cache *key_jar;
+struct kmem_cache *key_jar;
struct rb_root key_serial_tree; /* tree of keys indexed by serial */
DEFINE_SPINLOCK(key_serial_lock);
@@ -36,17 +36,9 @@ unsigned int key_quota_maxbytes = 20000; /* general key space quota */
static LIST_HEAD(key_types_list);
static DECLARE_RWSEM(key_types_sem);
-static void key_cleanup(struct work_struct *work);
-static DECLARE_WORK(key_cleanup_task, key_cleanup);
-
/* We serialise key instantiation and link */
DEFINE_MUTEX(key_construction_mutex);
-/* Any key who's type gets unegistered will be re-typed to this */
-static struct key_type key_type_dead = {
- .name = "dead",
-};
-
#ifdef KEY_DEBUGGING
void __key_check(const struct key *key)
{
@@ -591,71 +583,6 @@ int key_reject_and_link(struct key *key,
}
EXPORT_SYMBOL(key_reject_and_link);
-/*
- * Garbage collect keys in process context so that we don't have to disable
- * interrupts all over the place.
- *
- * key_put() schedules this rather than trying to do the cleanup itself, which
- * means key_put() doesn't have to sleep.
- */
-static void key_cleanup(struct work_struct *work)
-{
- struct rb_node *_n;
- struct key *key;
-
-go_again:
- /* look for a dead key in the tree */
- spin_lock(&key_serial_lock);
-
- for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
- key = rb_entry(_n, struct key, serial_node);
-
- if (atomic_read(&key->usage) == 0)
- goto found_dead_key;
- }
-
- spin_unlock(&key_serial_lock);
- return;
-
-found_dead_key:
- /* we found a dead key - once we've removed it from the tree, we can
- * drop the lock */
- rb_erase(&key->serial_node, &key_serial_tree);
- spin_unlock(&key_serial_lock);
-
- key_check(key);
-
- security_key_free(key);
-
- /* deal with the user's key tracking and quota */
- if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
- spin_lock(&key->user->lock);
- key->user->qnkeys--;
- key->user->qnbytes -= key->quotalen;
- spin_unlock(&key->user->lock);
- }
-
- atomic_dec(&key->user->nkeys);
- if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
- atomic_dec(&key->user->nikeys);
-
- key_user_put(key->user);
-
- /* now throw away the key memory */
- if (key->type->destroy)
- key->type->destroy(key);
-
- kfree(key->description);
-
-#ifdef KEY_DEBUGGING
- key->magic = KEY_DEBUG_MAGIC_X;
-#endif
- kmem_cache_free(key_jar, key);
-
- /* there may, of course, be more than one key to destroy */
- goto go_again;
-}
-
/**
* key_put - Discard a reference to a key.
* @key: The key to discard a reference from.
@@ -670,7 +597,7 @@ void key_put(struct key *key)
key_check(key);
if (atomic_dec_and_test(&key->usage))
- schedule_work(&key_cleanup_task);
+ queue_work(system_nrt_wq, &key_gc_work);
}
}
EXPORT_SYMBOL(key_put);
@@ -1048,49 +975,11 @@ EXPORT_SYMBOL(register_key_type);
*/
void unregister_key_type(struct key_type *ktype)
{
- struct rb_node *_n;
- struct key *key;
-
down_write(&key_types_sem);
-
- /* withdraw the key type */
list_del_init(&ktype->link);
-
- /* mark all the keys of this type dead */
- spin_lock(&key_serial_lock);
-
- for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
- key = rb_entry(_n, struct key, serial_node);
-
- if (key->type == ktype) {
- key->type = &key_type_dead;
- set_bit(KEY_FLAG_DEAD, &key->flags);
- }
- }
-
- spin_unlock(&key_serial_lock);
-
- /* make sure everyone revalidates their keys */
- synchronize_rcu();
-
- /* we should now be able to destroy the payloads of all the keys of
- * this type with impunity */
- spin_lock(&key_serial_lock);
-
- for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
- key = rb_entry(_n, struct key, serial_node);
-
- if (key->type == ktype) {
- if (ktype->destroy)
- ktype->destroy(key);
- memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
- }
- }
-
- spin_unlock(&key_serial_lock);
- up_write(&key_types_sem);
-
- key_schedule_gc(0);
+ downgrade_write(&key_types_sem);
+ key_gc_keytype(ktype);
+ up_read(&key_types_sem);
}
EXPORT_SYMBOL(unregister_key_type);
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 30e242f7bd0..37a7f3b2885 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -860,8 +860,7 @@ void __key_link(struct key *keyring, struct key *key,
kenter("%d,%d,%p", keyring->serial, key->serial, nklist);
- klist = rcu_dereference_protected(keyring->payload.subscriptions,
- rwsem_is_locked(&keyring->sem));
+ klist = rcu_dereference_locked_keyring(keyring);
atomic_inc(&key->usage);
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index a3063eb3dc2..1068cb1939b 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -270,7 +270,7 @@ static int install_session_keyring(struct key *keyring)
if (!new)
return -ENOMEM;
- ret = install_session_keyring_to_cred(new, NULL);
+ ret = install_session_keyring_to_cred(new, keyring);
if (ret < 0) {
abort_creds(new);
return ret;
@@ -589,12 +589,22 @@ try_again:
ret = install_user_keyrings();
if (ret < 0)
goto error;
- ret = install_session_keyring(
- cred->user->session_keyring);
+ if (lflags & KEY_LOOKUP_CREATE)
+ ret = join_session_keyring(NULL);
+ else
+ ret = install_session_keyring(
+ cred->user->session_keyring);
if (ret < 0)
goto error;
goto reget_creds;
+ } else if (cred->tgcred->session_keyring ==
+ cred->user->session_keyring &&
+ lflags & KEY_LOOKUP_CREATE) {
+ ret = join_session_keyring(NULL);
+ if (ret < 0)
+ goto error;
+ goto reget_creds;
}
rcu_read_lock();
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 0c33e2ea1f3..0964fc23694 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -779,7 +779,10 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
opt->pcrinfo_len = strlen(args[0].from) / 2;
if (opt->pcrinfo_len > MAX_PCRINFO_SIZE)
return -EINVAL;
- hex2bin(opt->pcrinfo, args[0].from, opt->pcrinfo_len);
+ res = hex2bin(opt->pcrinfo, args[0].from,
+ opt->pcrinfo_len);
+ if (res < 0)
+ return -EINVAL;
break;
case Opt_keyhandle:
res = strict_strtoul(args[0].from, 16, &handle);
@@ -791,12 +794,18 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
case Opt_keyauth:
if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
return -EINVAL;
- hex2bin(opt->keyauth, args[0].from, SHA1_DIGEST_SIZE);
+ res = hex2bin(opt->keyauth, args[0].from,
+ SHA1_DIGEST_SIZE);
+ if (res < 0)
+ return -EINVAL;
break;
case Opt_blobauth:
if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
return -EINVAL;
- hex2bin(opt->blobauth, args[0].from, SHA1_DIGEST_SIZE);
+ res = hex2bin(opt->blobauth, args[0].from,
+ SHA1_DIGEST_SIZE);
+ if (res < 0)
+ return -EINVAL;
break;
case Opt_migratable:
if (*args[0].from == '0')
@@ -860,7 +869,9 @@ static int datablob_parse(char *datablob, struct trusted_key_payload *p,
p->blob_len = strlen(c) / 2;
if (p->blob_len > MAX_BLOB_SIZE)
return -EINVAL;
- hex2bin(p->blob, c, p->blob_len);
+ ret = hex2bin(p->blob, c, p->blob_len);
+ if (ret < 0)
+ return -EINVAL;
ret = getoptions(datablob, p, o);
if (ret < 0)
return ret;
diff --git a/security/security.c b/security/security.c
index 0e4fccfef12..0c6cc69c8f8 100644
--- a/security/security.c
+++ b/security/security.c
@@ -16,15 +16,16 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/security.h>
+#include <linux/integrity.h>
#include <linux/ima.h>
+#include <linux/evm.h>
+
+#define MAX_LSM_EVM_XATTR 2
/* Boot-time LSM user choice */
static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
CONFIG_DEFAULT_SECURITY;
-/* things that live in capability.c */
-extern void __init security_fixup_ops(struct security_operations *ops);
-
static struct security_operations *security_ops;
static struct security_operations default_security_ops = {
.name = "default",
@@ -334,20 +335,57 @@ int security_inode_alloc(struct inode *inode)
void security_inode_free(struct inode *inode)
{
- ima_inode_free(inode);
+ integrity_inode_free(inode);
security_ops->inode_free_security(inode);
}
int security_inode_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, char **name,
- void **value, size_t *len)
+ const struct qstr *qstr,
+ const initxattrs initxattrs, void *fs_data)
{
+ struct xattr new_xattrs[MAX_LSM_EVM_XATTR + 1];
+ struct xattr *lsm_xattr, *evm_xattr, *xattr;
+ int ret;
+
if (unlikely(IS_PRIVATE(inode)))
- return -EOPNOTSUPP;
+ return 0;
+
+ memset(new_xattrs, 0, sizeof new_xattrs);
+ if (!initxattrs)
+ return security_ops->inode_init_security(inode, dir, qstr,
+ NULL, NULL, NULL);
+ lsm_xattr = new_xattrs;
+ ret = security_ops->inode_init_security(inode, dir, qstr,
+ &lsm_xattr->name,
+ &lsm_xattr->value,
+ &lsm_xattr->value_len);
+ if (ret)
+ goto out;
+
+ evm_xattr = lsm_xattr + 1;
+ ret = evm_inode_init_security(inode, lsm_xattr, evm_xattr);
+ if (ret)
+ goto out;
+ ret = initxattrs(inode, new_xattrs, fs_data);
+out:
+ for (xattr = new_xattrs; xattr->name != NULL; xattr++) {
+ kfree(xattr->name);
+ kfree(xattr->value);
+ }
+ return (ret == -EOPNOTSUPP) ? 0 : ret;
+}
+EXPORT_SYMBOL(security_inode_init_security);
+
+int security_old_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr, char **name,
+ void **value, size_t *len)
+{
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
return security_ops->inode_init_security(inode, dir, qstr, name, value,
len);
}
-EXPORT_SYMBOL(security_inode_init_security);
+EXPORT_SYMBOL(security_old_inode_init_security);
#ifdef CONFIG_SECURITY_PATH
int security_path_mknod(struct path *dir, struct dentry *dentry, int mode,
@@ -523,9 +561,14 @@ int security_inode_permission(struct inode *inode, int mask)
int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
{
+ int ret;
+
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
- return security_ops->inode_setattr(dentry, attr);
+ ret = security_ops->inode_setattr(dentry, attr);
+ if (ret)
+ return ret;
+ return evm_inode_setattr(dentry, attr);
}
EXPORT_SYMBOL_GPL(security_inode_setattr);
@@ -539,9 +582,14 @@ int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
int security_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
+ int ret;
+
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
- return security_ops->inode_setxattr(dentry, name, value, size, flags);
+ ret = security_ops->inode_setxattr(dentry, name, value, size, flags);
+ if (ret)
+ return ret;
+ return evm_inode_setxattr(dentry, name, value, size);
}
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
@@ -550,6 +598,7 @@ void security_inode_post_setxattr(struct dentry *dentry, const char *name,
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return;
security_ops->inode_post_setxattr(dentry, name, value, size, flags);
+ evm_inode_post_setxattr(dentry, name, value, size);
}
int security_inode_getxattr(struct dentry *dentry, const char *name)
@@ -568,9 +617,14 @@ int security_inode_listxattr(struct dentry *dentry)
int security_inode_removexattr(struct dentry *dentry, const char *name)
{
+ int ret;
+
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
- return security_ops->inode_removexattr(dentry, name);
+ ret = security_ops->inode_removexattr(dentry, name);
+ if (ret)
+ return ret;
+ return evm_inode_removexattr(dentry, name);
}
int security_inode_need_killpriv(struct dentry *dentry)
@@ -1097,6 +1151,7 @@ void security_sk_clone(const struct sock *sk, struct sock *newsk)
{
security_ops->sk_clone_security(sk, newsk);
}
+EXPORT_SYMBOL(security_sk_clone);
void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
{
diff --git a/security/selinux/exports.c b/security/selinux/exports.c
index 90664385dea..e75dd94e2d2 100644
--- a/security/selinux/exports.c
+++ b/security/selinux/exports.c
@@ -12,6 +12,7 @@
* as published by the Free Software Foundation.
*/
#include <linux/module.h>
+#include <linux/selinux.h>
#include "security.h"
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 266a2292451..e545b9f6707 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -89,14 +89,14 @@
#include "xfrm.h"
#include "netlabel.h"
#include "audit.h"
+#include "avc_ss.h"
#define NUM_SEL_MNT_OPTS 5
-extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
extern struct security_operations *security_ops;
/* SECMARK reference count */
-atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
+static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
int selinux_enforcing;
@@ -279,10 +279,6 @@ static void superblock_free_security(struct super_block *sb)
kfree(sbsec);
}
-/* The security server must be initialized before
- any labeling or access decisions can be provided. */
-extern int ss_initialized;
-
/* The file system's label must be initialized prior to use. */
static const char *labeling_behaviors[6] = {
@@ -2097,9 +2093,6 @@ static int selinux_bprm_secureexec(struct linux_binprm *bprm)
return (atsecure || cap_bprm_secureexec(bprm));
}
-extern struct vfsmount *selinuxfs_mount;
-extern struct dentry *selinux_null;
-
/* Derived from fs/exec.c:flush_old_files. */
static inline void flush_unauthorized_files(const struct cred *cred,
struct files_struct *files)
@@ -5803,8 +5796,6 @@ static int selinux_disabled;
int selinux_disable(void)
{
- extern void exit_sel_fs(void);
-
if (ss_initialized) {
/* Not permitted after initial policy load. */
return -EINVAL;
diff --git a/security/selinux/include/avc_ss.h b/security/selinux/include/avc_ss.h
index 4677aa519b0..d5c328452df 100644
--- a/security/selinux/include/avc_ss.h
+++ b/security/selinux/include/avc_ss.h
@@ -18,5 +18,11 @@ struct security_class_mapping {
extern struct security_class_mapping secclass_map[];
+/*
+ * The security server must be initialized before
+ * any labeling or access decisions can be provided.
+ */
+extern int ss_initialized;
+
#endif /* _SELINUX_AVC_SS_H_ */
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 3ba4feba048..d871e8ad210 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -216,6 +216,14 @@ struct selinux_kernel_status {
extern void selinux_status_update_setenforce(int enforcing);
extern void selinux_status_update_policyload(int seqno);
+extern void selinux_complete_init(void);
+extern int selinux_disable(void);
+extern void exit_sel_fs(void);
+extern struct dentry *selinux_null;
+extern struct vfsmount *selinuxfs_mount;
+extern void selnl_notify_setenforce(int val);
+extern void selnl_notify_policyload(u32 seqno);
+extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
#endif /* _SELINUX_SECURITY_H_ */
diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c
index 36ac257cec9..ce3f481558d 100644
--- a/security/selinux/netlink.c
+++ b/security/selinux/netlink.c
@@ -19,6 +19,8 @@
#include <linux/selinux_netlink.h>
#include <net/net_namespace.h>
+#include "security.h"
+
static struct sock *selnl;
static int selnl_msglen(int msgtype)
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 8b02b2137da..0920ea3bf59 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -21,6 +21,7 @@
#include "flask.h"
#include "av_permissions.h"
+#include "security.h"
struct nlmsg_perm {
u16 nlmsg_type;
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 55d92cbb177..f46658722c7 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -75,8 +75,6 @@ static char policy_opened;
/* global data for policy capabilities */
static struct dentry *policycap_dir;
-extern void selnl_notify_setenforce(int val);
-
/* Check whether a task is allowed to use a security operation. */
static int task_has_security(struct task_struct *tsk,
u32 perms)
@@ -278,7 +276,6 @@ static ssize_t sel_write_disable(struct file *file, const char __user *buf,
char *page = NULL;
ssize_t length;
int new_value;
- extern int selinux_disable(void);
length = -ENOMEM;
if (count >= PAGE_SIZE)
@@ -478,7 +475,7 @@ static struct vm_operations_struct sel_mmap_policy_ops = {
.page_mkwrite = sel_mmap_policy_fault,
};
-int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
+static int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_SHARED) {
/* do not allow mprotect to make mapping writable */
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index a53373207fb..2ec904177fe 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -555,7 +555,7 @@ static int cond_write_av_list(struct policydb *p,
return 0;
}
-int cond_write_node(struct policydb *p, struct cond_node *node,
+static int cond_write_node(struct policydb *p, struct cond_node *node,
struct policy_file *fp)
{
struct cond_expr *cur_expr;
diff --git a/security/selinux/ss/conditional.h b/security/selinux/ss/conditional.h
index 3f209c63529..4d1f8746650 100644
--- a/security/selinux/ss/conditional.h
+++ b/security/selinux/ss/conditional.h
@@ -13,6 +13,7 @@
#include "avtab.h"
#include "symtab.h"
#include "policydb.h"
+#include "../include/conditional.h"
#define COND_EXPR_MAXDEPTH 10
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 2381d0ded22..a7f61d52f05 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -1743,8 +1743,6 @@ static int policydb_bounds_sanity_check(struct policydb *p)
return 0;
}
-extern int ss_initialized;
-
u16 string_to_security_class(struct policydb *p, const char *name)
{
struct class_datum *cladatum;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index f6917bc0aa0..185f849a26f 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -70,8 +70,6 @@
#include "ebitmap.h"
#include "audit.h"
-extern void selnl_notify_policyload(u32 seqno);
-
int selinux_policycap_netpeer;
int selinux_policycap_openperm;
@@ -1790,7 +1788,6 @@ static void security_load_policycaps(void)
POLICYDB_CAPABILITY_OPENPERM);
}
-extern void selinux_complete_init(void);
static int security_preserve_bools(struct policydb *p);
/**
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 2b6c6a51612..2ad00657b80 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -41,9 +41,9 @@ struct superblock_smack {
};
struct socket_smack {
- char *smk_out; /* outbound label */
- char *smk_in; /* inbound label */
- char smk_packet[SMK_LABELLEN]; /* TCP peer label */
+ char *smk_out; /* outbound label */
+ char *smk_in; /* inbound label */
+ char *smk_packet; /* TCP peer label */
};
/*
@@ -116,13 +116,19 @@ struct smk_netlbladdr {
* If there is a cipso value associated with the label it
* gets stored here, too. This will most likely be rare as
* the cipso direct mapping in used internally.
+ *
+ * Keep the access rules for this subject label here so that
+ * the entire set of rules does not need to be examined every
+ * time.
*/
struct smack_known {
struct list_head list;
char smk_known[SMK_LABELLEN];
u32 smk_secid;
struct smack_cipso *smk_cipso;
- spinlock_t smk_cipsolock; /* for changing cipso map */
+ spinlock_t smk_cipsolock; /* for changing cipso map */
+ struct list_head smk_rules; /* access rules */
+ struct mutex smk_rules_lock; /* lock for the rules */
};
/*
@@ -150,7 +156,6 @@ struct smack_known {
/*
* smackfs magic number
- * smackfs macic number
*/
#define SMACK_MAGIC 0x43415d53 /* "SMAC" */
@@ -176,9 +181,9 @@ struct smack_known {
#define MAY_NOT 0
/*
- * Number of access types used by Smack (rwxa)
+ * Number of access types used by Smack (rwxat)
*/
-#define SMK_NUM_ACCESS_TYPE 4
+#define SMK_NUM_ACCESS_TYPE 5
/*
* Smack audit data; is empty if CONFIG_AUDIT not set
@@ -201,10 +206,12 @@ int smk_access_entry(char *, char *, struct list_head *);
int smk_access(char *, char *, int, struct smk_audit_info *);
int smk_curacc(char *, u32, struct smk_audit_info *);
int smack_to_cipso(const char *, struct smack_cipso *);
-void smack_from_cipso(u32, char *, char *);
+char *smack_from_cipso(u32, char *);
char *smack_from_secid(const u32);
+void smk_parse_smack(const char *string, int len, char *smack);
char *smk_import(const char *, int);
struct smack_known *smk_import_entry(const char *, int);
+struct smack_known *smk_find_entry(const char *);
u32 smack_to_secid(const char *);
/*
@@ -223,7 +230,6 @@ extern struct smack_known smack_known_star;
extern struct smack_known smack_known_web;
extern struct list_head smack_known_list;
-extern struct list_head smack_rule_list;
extern struct list_head smk_netlbladdr_list;
extern struct security_operations smack_ops;
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 9637e107f7e..cc7cb6edba0 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -77,14 +77,19 @@ int log_policy = SMACK_AUDIT_DENIED;
* entry is found returns -ENOENT.
*
* NOTE:
- * Even though Smack labels are usually shared on smack_list
- * labels that come in off the network can't be imported
- * and added to the list for locking reasons.
*
- * Therefore, it is necessary to check the contents of the labels,
- * not just the pointer values. Of course, in most cases the labels
- * will be on the list, so checking the pointers may be a worthwhile
- * optimization.
+ * Earlier versions of this function allowed for labels that
+ * were not on the label list. This was done to allow for
+ * labels to come over the network that had never been seen
+ * before on this host. Unless the receiving socket has the
+ * star label this will always result in a failure check. The
+ * star labeled socket case is now handled in the networking
+ * hooks so there is no case where the label is not on the
+ * label list. Checking to see if the address of two labels
+ * is the same is now a reliable test.
+ *
+ * Do the object check first because that is more
+ * likely to differ.
*/
int smk_access_entry(char *subject_label, char *object_label,
struct list_head *rule_list)
@@ -93,13 +98,10 @@ int smk_access_entry(char *subject_label, char *object_label,
struct smack_rule *srp;
list_for_each_entry_rcu(srp, rule_list, list) {
- if (srp->smk_subject == subject_label ||
- strcmp(srp->smk_subject, subject_label) == 0) {
- if (srp->smk_object == object_label ||
- strcmp(srp->smk_object, object_label) == 0) {
- may = srp->smk_access;
- break;
- }
+ if (srp->smk_object == object_label &&
+ srp->smk_subject == subject_label) {
+ may = srp->smk_access;
+ break;
}
}
@@ -117,18 +119,12 @@ int smk_access_entry(char *subject_label, char *object_label,
* access rule list and returns 0 if the access is permitted,
* non zero otherwise.
*
- * Even though Smack labels are usually shared on smack_list
- * labels that come in off the network can't be imported
- * and added to the list for locking reasons.
- *
- * Therefore, it is necessary to check the contents of the labels,
- * not just the pointer values. Of course, in most cases the labels
- * will be on the list, so checking the pointers may be a worthwhile
- * optimization.
+ * Smack labels are shared on smack_list
*/
int smk_access(char *subject_label, char *object_label, int request,
struct smk_audit_info *a)
{
+ struct smack_known *skp;
int may = MAY_NOT;
int rc = 0;
@@ -137,8 +133,7 @@ int smk_access(char *subject_label, char *object_label, int request,
*
* A star subject can't access any object.
*/
- if (subject_label == smack_known_star.smk_known ||
- strcmp(subject_label, smack_known_star.smk_known) == 0) {
+ if (subject_label == smack_known_star.smk_known) {
rc = -EACCES;
goto out_audit;
}
@@ -148,33 +143,27 @@ int smk_access(char *subject_label, char *object_label, int request,
* An internet subject can access any object.
*/
if (object_label == smack_known_web.smk_known ||
- subject_label == smack_known_web.smk_known ||
- strcmp(object_label, smack_known_web.smk_known) == 0 ||
- strcmp(subject_label, smack_known_web.smk_known) == 0)
+ subject_label == smack_known_web.smk_known)
goto out_audit;
/*
* A star object can be accessed by any subject.
*/
- if (object_label == smack_known_star.smk_known ||
- strcmp(object_label, smack_known_star.smk_known) == 0)
+ if (object_label == smack_known_star.smk_known)
goto out_audit;
/*
* An object can be accessed in any way by a subject
* with the same label.
*/
- if (subject_label == object_label ||
- strcmp(subject_label, object_label) == 0)
+ if (subject_label == object_label)
goto out_audit;
/*
* A hat subject can read any object.
* A floor object can be read by any subject.
*/
if ((request & MAY_ANYREAD) == request) {
- if (object_label == smack_known_floor.smk_known ||
- strcmp(object_label, smack_known_floor.smk_known) == 0)
+ if (object_label == smack_known_floor.smk_known)
goto out_audit;
- if (subject_label == smack_known_hat.smk_known ||
- strcmp(subject_label, smack_known_hat.smk_known) == 0)
+ if (subject_label == smack_known_hat.smk_known)
goto out_audit;
}
/*
@@ -184,8 +173,9 @@ int smk_access(char *subject_label, char *object_label, int request,
* good. A negative response from smk_access_entry()
* indicates there is no entry for this pair.
*/
+ skp = smk_find_entry(subject_label);
rcu_read_lock();
- may = smk_access_entry(subject_label, object_label, &smack_rule_list);
+ may = smk_access_entry(subject_label, object_label, &skp->smk_rules);
rcu_read_unlock();
if (may > 0 && (request & may) == request)
@@ -344,17 +334,32 @@ void smack_log(char *subject_label, char *object_label, int request,
static DEFINE_MUTEX(smack_known_lock);
/**
- * smk_import_entry - import a label, return the list entry
+ * smk_find_entry - find a label on the list, return the list entry
* @string: a text string that might be a Smack label
- * @len: the maximum size, or zero if it is NULL terminated.
*
* Returns a pointer to the entry in the label list that
- * matches the passed string, adding it if necessary.
+ * matches the passed string.
*/
-struct smack_known *smk_import_entry(const char *string, int len)
+struct smack_known *smk_find_entry(const char *string)
{
struct smack_known *skp;
- char smack[SMK_LABELLEN];
+
+ list_for_each_entry_rcu(skp, &smack_known_list, list) {
+ if (strncmp(skp->smk_known, string, SMK_MAXLEN) == 0)
+ return skp;
+ }
+
+ return NULL;
+}
+
+/**
+ * smk_parse_smack - parse smack label from a text string
+ * @string: a text string that might contain a Smack label
+ * @len: the maximum size, or zero if it is NULL terminated.
+ * @smack: parsed smack label, or NULL if parse error
+ */
+void smk_parse_smack(const char *string, int len, char *smack)
+{
int found;
int i;
@@ -372,27 +377,38 @@ struct smack_known *smk_import_entry(const char *string, int len)
} else
smack[i] = string[i];
}
+}
+
+/**
+ * smk_import_entry - import a label, return the list entry
+ * @string: a text string that might be a Smack label
+ * @len: the maximum size, or zero if it is NULL terminated.
+ *
+ * Returns a pointer to the entry in the label list that
+ * matches the passed string, adding it if necessary.
+ */
+struct smack_known *smk_import_entry(const char *string, int len)
+{
+ struct smack_known *skp;
+ char smack[SMK_LABELLEN];
+ smk_parse_smack(string, len, smack);
if (smack[0] == '\0')
return NULL;
mutex_lock(&smack_known_lock);
- found = 0;
- list_for_each_entry_rcu(skp, &smack_known_list, list) {
- if (strncmp(skp->smk_known, smack, SMK_MAXLEN) == 0) {
- found = 1;
- break;
- }
- }
+ skp = smk_find_entry(smack);
- if (found == 0) {
+ if (skp == NULL) {
skp = kzalloc(sizeof(struct smack_known), GFP_KERNEL);
if (skp != NULL) {
strncpy(skp->smk_known, smack, SMK_MAXLEN);
skp->smk_secid = smack_next_secid++;
skp->smk_cipso = NULL;
+ INIT_LIST_HEAD(&skp->smk_rules);
spin_lock_init(&skp->smk_cipsolock);
+ mutex_init(&skp->smk_rules_lock);
/*
* Make sure that the entry is actually
* filled before putting it on the list.
@@ -480,19 +496,12 @@ u32 smack_to_secid(const char *smack)
* smack_from_cipso - find the Smack label associated with a CIPSO option
* @level: Bell & LaPadula level from the network
* @cp: Bell & LaPadula categories from the network
- * @result: where to put the Smack value
*
* This is a simple lookup in the label table.
*
- * This is an odd duck as far as smack handling goes in that
- * it sends back a copy of the smack label rather than a pointer
- * to the master list. This is done because it is possible for
- * a foreign host to send a smack label that is new to this
- * machine and hence not on the list. That would not be an
- * issue except that adding an entry to the master list can't
- * be done at that point.
+ * Return the matching label from the label list or NULL.
*/
-void smack_from_cipso(u32 level, char *cp, char *result)
+char *smack_from_cipso(u32 level, char *cp)
{
struct smack_known *kp;
char *final = NULL;
@@ -509,12 +518,13 @@ void smack_from_cipso(u32 level, char *cp, char *result)
final = kp->smk_known;
spin_unlock_bh(&kp->smk_cipsolock);
+
+ if (final != NULL)
+ break;
}
rcu_read_unlock();
- if (final == NULL)
- final = smack_known_huh.smk_known;
- strncpy(result, final, SMK_MAXLEN);
- return;
+
+ return final;
}
/**
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index b9c5e149903..7db62b48eb4 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -5,12 +5,13 @@
*
* Authors:
* Casey Schaufler <casey@schaufler-ca.com>
- * Jarkko Sakkinen <ext-jarkko.2.sakkinen@nokia.com>
+ * Jarkko Sakkinen <jarkko.sakkinen@intel.com>
*
* Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
* Paul Moore <paul@paul-moore.com>
* Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2011 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
@@ -34,6 +35,7 @@
#include <linux/audit.h>
#include <linux/magic.h>
#include <linux/dcache.h>
+#include <linux/personality.h>
#include "smack.h"
#define task_security(task) (task_cred_xxx((task), security))
@@ -441,11 +443,17 @@ static int smack_sb_umount(struct vfsmount *mnt, int flags)
* BPRM hooks
*/
+/**
+ * smack_bprm_set_creds - set creds for exec
+ * @bprm: the exec information
+ *
+ * Returns 0 if it gets a blob, -ENOMEM otherwise
+ */
static int smack_bprm_set_creds(struct linux_binprm *bprm)
{
- struct task_smack *tsp = bprm->cred->security;
+ struct inode *inode = bprm->file->f_path.dentry->d_inode;
+ struct task_smack *bsp = bprm->cred->security;
struct inode_smack *isp;
- struct dentry *dp;
int rc;
rc = cap_bprm_set_creds(bprm);
@@ -455,20 +463,48 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
if (bprm->cred_prepared)
return 0;
- if (bprm->file == NULL || bprm->file->f_dentry == NULL)
+ isp = inode->i_security;
+ if (isp->smk_task == NULL || isp->smk_task == bsp->smk_task)
return 0;
- dp = bprm->file->f_dentry;
+ if (bprm->unsafe)
+ return -EPERM;
- if (dp->d_inode == NULL)
- return 0;
+ bsp->smk_task = isp->smk_task;
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
- isp = dp->d_inode->i_security;
+ return 0;
+}
- if (isp->smk_task != NULL)
- tsp->smk_task = isp->smk_task;
+/**
+ * smack_bprm_committing_creds - Prepare to install the new credentials
+ * from bprm.
+ *
+ * @bprm: binprm for exec
+ */
+static void smack_bprm_committing_creds(struct linux_binprm *bprm)
+{
+ struct task_smack *bsp = bprm->cred->security;
- return 0;
+ if (bsp->smk_task != bsp->smk_forked)
+ current->pdeath_signal = 0;
+}
+
+/**
+ * smack_bprm_secureexec - Return the decision to use secureexec.
+ * @bprm: binprm for exec
+ *
+ * Returns 0 on success.
+ */
+static int smack_bprm_secureexec(struct linux_binprm *bprm)
+{
+ struct task_smack *tsp = current_security();
+ int ret = cap_bprm_secureexec(bprm);
+
+ if (!ret && (tsp->smk_task != tsp->smk_forked))
+ ret = 1;
+
+ return ret;
}
/*
@@ -516,6 +552,8 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr, char **name,
void **value, size_t *len)
{
+ struct smack_known *skp;
+ char *csp = smk_of_current();
char *isp = smk_of_inode(inode);
char *dsp = smk_of_inode(dir);
int may;
@@ -527,8 +565,9 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
}
if (value) {
+ skp = smk_find_entry(csp);
rcu_read_lock();
- may = smk_access_entry(smk_of_current(), dsp, &smack_rule_list);
+ may = smk_access_entry(csp, dsp, &skp->smk_rules);
rcu_read_unlock();
/*
@@ -841,7 +880,7 @@ static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
return;
}
-/*
+/**
* smack_inode_getxattr - Smack check on getxattr
* @dentry: the object
* @name: unused
@@ -858,7 +897,7 @@ static int smack_inode_getxattr(struct dentry *dentry, const char *name)
return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad);
}
-/*
+/**
* smack_inode_removexattr - Smack check on removexattr
* @dentry: the object
* @name: name of the attribute
@@ -1088,36 +1127,31 @@ static int smack_file_lock(struct file *file, unsigned int cmd)
* @cmd: what action to check
* @arg: unused
*
+ * Generally these operations are harmless.
+ * File locking operations present an obvious mechanism
+ * for passing information, so they require write access.
+ *
* Returns 0 if current has access, error code otherwise
*/
static int smack_file_fcntl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct smk_audit_info ad;
- int rc;
+ int rc = 0;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
- smk_ad_setfield_u_fs_path(&ad, file->f_path);
switch (cmd) {
- case F_DUPFD:
- case F_GETFD:
- case F_GETFL:
case F_GETLK:
- case F_GETOWN:
- case F_GETSIG:
- rc = smk_curacc(file->f_security, MAY_READ, &ad);
- break;
- case F_SETFD:
- case F_SETFL:
case F_SETLK:
case F_SETLKW:
case F_SETOWN:
case F_SETSIG:
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
rc = smk_curacc(file->f_security, MAY_WRITE, &ad);
break;
default:
- rc = smk_curacc(file->f_security, MAY_READWRITE, &ad);
+ break;
}
return rc;
@@ -1138,6 +1172,7 @@ static int smack_file_mmap(struct file *file,
unsigned long flags, unsigned long addr,
unsigned long addr_only)
{
+ struct smack_known *skp;
struct smack_rule *srp;
struct task_smack *tsp;
char *sp;
@@ -1170,6 +1205,7 @@ static int smack_file_mmap(struct file *file,
tsp = current_security();
sp = smk_of_current();
+ skp = smk_find_entry(sp);
rc = 0;
rcu_read_lock();
@@ -1177,15 +1213,8 @@ static int smack_file_mmap(struct file *file,
* For each Smack rule associated with the subject
* label verify that the SMACK64MMAP also has access
* to that rule's object label.
- *
- * Because neither of the labels comes
- * from the networking code it is sufficient
- * to compare pointers.
*/
- list_for_each_entry_rcu(srp, &smack_rule_list, list) {
- if (srp->smk_subject != sp)
- continue;
-
+ list_for_each_entry_rcu(srp, &skp->smk_rules, list) {
osmack = srp->smk_object;
/*
* Matching labels always allows access.
@@ -1214,7 +1243,8 @@ static int smack_file_mmap(struct file *file,
* If there isn't one a SMACK64MMAP subject
* can't have as much access as current.
*/
- mmay = smk_access_entry(msmack, osmack, &smack_rule_list);
+ skp = smk_find_entry(msmack);
+ mmay = smk_access_entry(msmack, osmack, &skp->smk_rules);
if (mmay == -ENOENT) {
rc = -EACCES;
break;
@@ -1315,6 +1345,24 @@ static int smack_file_receive(struct file *file)
return smk_curacc(file->f_security, may, &ad);
}
+/**
+ * smack_dentry_open - Smack dentry open processing
+ * @file: the object
+ * @cred: unused
+ *
+ * Set the security blob in the file structure.
+ *
+ * Returns 0
+ */
+static int smack_dentry_open(struct file *file, const struct cred *cred)
+{
+ struct inode_smack *isp = file->f_path.dentry->d_inode->i_security;
+
+ file->f_security = isp->smk_inode;
+
+ return 0;
+}
+
/*
* Task hooks
*/
@@ -1455,15 +1503,17 @@ static int smack_kernel_create_files_as(struct cred *new,
/**
* smk_curacc_on_task - helper to log task related access
* @p: the task object
- * @access : the access requested
+ * @access: the access requested
+ * @caller: name of the calling function for audit
*
* Return 0 if access is permitted
*/
-static int smk_curacc_on_task(struct task_struct *p, int access)
+static int smk_curacc_on_task(struct task_struct *p, int access,
+ const char *caller)
{
struct smk_audit_info ad;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
+ smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, p);
return smk_curacc(smk_of_task(task_security(p)), access, &ad);
}
@@ -1477,7 +1527,7 @@ static int smk_curacc_on_task(struct task_struct *p, int access)
*/
static int smack_task_setpgid(struct task_struct *p, pid_t pgid)
{
- return smk_curacc_on_task(p, MAY_WRITE);
+ return smk_curacc_on_task(p, MAY_WRITE, __func__);
}
/**
@@ -1488,7 +1538,7 @@ static int smack_task_setpgid(struct task_struct *p, pid_t pgid)
*/
static int smack_task_getpgid(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_READ);
+ return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
@@ -1499,7 +1549,7 @@ static int smack_task_getpgid(struct task_struct *p)
*/
static int smack_task_getsid(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_READ);
+ return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
@@ -1527,7 +1577,7 @@ static int smack_task_setnice(struct task_struct *p, int nice)
rc = cap_task_setnice(p, nice);
if (rc == 0)
- rc = smk_curacc_on_task(p, MAY_WRITE);
+ rc = smk_curacc_on_task(p, MAY_WRITE, __func__);
return rc;
}
@@ -1544,7 +1594,7 @@ static int smack_task_setioprio(struct task_struct *p, int ioprio)
rc = cap_task_setioprio(p, ioprio);
if (rc == 0)
- rc = smk_curacc_on_task(p, MAY_WRITE);
+ rc = smk_curacc_on_task(p, MAY_WRITE, __func__);
return rc;
}
@@ -1556,7 +1606,7 @@ static int smack_task_setioprio(struct task_struct *p, int ioprio)
*/
static int smack_task_getioprio(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_READ);
+ return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
@@ -1573,7 +1623,7 @@ static int smack_task_setscheduler(struct task_struct *p)
rc = cap_task_setscheduler(p);
if (rc == 0)
- rc = smk_curacc_on_task(p, MAY_WRITE);
+ rc = smk_curacc_on_task(p, MAY_WRITE, __func__);
return rc;
}
@@ -1585,7 +1635,7 @@ static int smack_task_setscheduler(struct task_struct *p)
*/
static int smack_task_getscheduler(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_READ);
+ return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
@@ -1596,7 +1646,7 @@ static int smack_task_getscheduler(struct task_struct *p)
*/
static int smack_task_movememory(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_WRITE);
+ return smk_curacc_on_task(p, MAY_WRITE, __func__);
}
/**
@@ -1711,7 +1761,7 @@ static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)
ssp->smk_in = csp;
ssp->smk_out = csp;
- ssp->smk_packet[0] = '\0';
+ ssp->smk_packet = NULL;
sk->sk_security = ssp;
@@ -2753,6 +2803,7 @@ static int smack_unix_stream_connect(struct sock *sock,
{
struct socket_smack *ssp = sock->sk_security;
struct socket_smack *osp = other->sk_security;
+ struct socket_smack *nsp = newsk->sk_security;
struct smk_audit_info ad;
int rc = 0;
@@ -2762,6 +2813,14 @@ static int smack_unix_stream_connect(struct sock *sock,
if (!capable(CAP_MAC_OVERRIDE))
rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
+ /*
+ * Cross reference the peer labels for SO_PEERSEC.
+ */
+ if (rc == 0) {
+ nsp->smk_packet = ssp->smk_out;
+ ssp->smk_packet = osp->smk_out;
+ }
+
return rc;
}
@@ -2813,16 +2872,17 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
return smack_netlabel_send(sock->sk, sip);
}
-
/**
* smack_from_secattr - Convert a netlabel attr.mls.lvl/attr.mls.cat pair to smack
* @sap: netlabel secattr
- * @sip: where to put the result
+ * @ssp: socket security information
*
- * Copies a smack label into sip
+ * Returns a pointer to a Smack label found on the label list.
*/
-static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip)
+static char *smack_from_secattr(struct netlbl_lsm_secattr *sap,
+ struct socket_smack *ssp)
{
+ struct smack_known *skp;
char smack[SMK_LABELLEN];
char *sp;
int pcat;
@@ -2852,15 +2912,43 @@ static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip)
* we are already done. WeeHee.
*/
if (sap->attr.mls.lvl == smack_cipso_direct) {
- memcpy(sip, smack, SMK_MAXLEN);
- return;
+ /*
+ * The label sent is usually on the label list.
+ *
+ * If it is not we may still want to allow the
+ * delivery.
+ *
+ * If the recipient is accepting all packets
+ * because it is using the star ("*") label
+ * for SMACK64IPIN provide the web ("@") label
+ * so that a directed response will succeed.
+ * This is not very correct from a MAC point
+ * of view, but gets around the problem that
+ * locking prevents adding the newly discovered
+ * label to the list.
+ * The case where the recipient is not using
+ * the star label should obviously fail.
+ * The easy way to do this is to provide the
+ * star label as the subject label.
+ */
+ skp = smk_find_entry(smack);
+ if (skp != NULL)
+ return skp->smk_known;
+ if (ssp != NULL &&
+ ssp->smk_in == smack_known_star.smk_known)
+ return smack_known_web.smk_known;
+ return smack_known_star.smk_known;
}
/*
* Look it up in the supplied table if it is not
* a direct mapping.
*/
- smack_from_cipso(sap->attr.mls.lvl, smack, sip);
- return;
+ sp = smack_from_cipso(sap->attr.mls.lvl, smack);
+ if (sp != NULL)
+ return sp;
+ if (ssp != NULL && ssp->smk_in == smack_known_star.smk_known)
+ return smack_known_web.smk_known;
+ return smack_known_star.smk_known;
}
if ((sap->flags & NETLBL_SECATTR_SECID) != 0) {
/*
@@ -2875,16 +2963,14 @@ static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip)
* secid is from a fallback.
*/
BUG_ON(sp == NULL);
- strncpy(sip, sp, SMK_MAXLEN);
- return;
+ return sp;
}
/*
* Without guidance regarding the smack value
* for the packet fall back on the network
* ambient value.
*/
- strncpy(sip, smack_net_ambient, SMK_MAXLEN);
- return;
+ return smack_net_ambient;
}
/**
@@ -2898,7 +2984,6 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct netlbl_lsm_secattr secattr;
struct socket_smack *ssp = sk->sk_security;
- char smack[SMK_LABELLEN];
char *csp;
int rc;
struct smk_audit_info ad;
@@ -2911,10 +2996,9 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr);
- if (rc == 0) {
- smack_from_secattr(&secattr, smack);
- csp = smack;
- } else
+ if (rc == 0)
+ csp = smack_from_secattr(&secattr, ssp);
+ else
csp = smack_net_ambient;
netlbl_secattr_destroy(&secattr);
@@ -2951,15 +3035,19 @@ static int smack_socket_getpeersec_stream(struct socket *sock,
int __user *optlen, unsigned len)
{
struct socket_smack *ssp;
- int slen;
+ char *rcp = "";
+ int slen = 1;
int rc = 0;
ssp = sock->sk->sk_security;
- slen = strlen(ssp->smk_packet) + 1;
+ if (ssp->smk_packet != NULL) {
+ rcp = ssp->smk_packet;
+ slen = strlen(rcp) + 1;
+ }
if (slen > len)
rc = -ERANGE;
- else if (copy_to_user(optval, ssp->smk_packet, slen) != 0)
+ else if (copy_to_user(optval, rcp, slen) != 0)
rc = -EFAULT;
if (put_user(slen, optlen) != 0)
@@ -2982,8 +3070,8 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
{
struct netlbl_lsm_secattr secattr;
- struct socket_smack *sp;
- char smack[SMK_LABELLEN];
+ struct socket_smack *ssp = NULL;
+ char *sp;
int family = PF_UNSPEC;
u32 s = 0; /* 0 is the invalid secid */
int rc;
@@ -2998,17 +3086,19 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
family = sock->sk->sk_family;
if (family == PF_UNIX) {
- sp = sock->sk->sk_security;
- s = smack_to_secid(sp->smk_out);
+ ssp = sock->sk->sk_security;
+ s = smack_to_secid(ssp->smk_out);
} else if (family == PF_INET || family == PF_INET6) {
/*
* Translate what netlabel gave us.
*/
+ if (sock != NULL && sock->sk != NULL)
+ ssp = sock->sk->sk_security;
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, family, &secattr);
if (rc == 0) {
- smack_from_secattr(&secattr, smack);
- s = smack_to_secid(smack);
+ sp = smack_from_secattr(&secattr, ssp);
+ s = smack_to_secid(sp);
}
netlbl_secattr_destroy(&secattr);
}
@@ -3056,7 +3146,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
struct netlbl_lsm_secattr secattr;
struct sockaddr_in addr;
struct iphdr *hdr;
- char smack[SMK_LABELLEN];
+ char *sp;
int rc;
struct smk_audit_info ad;
@@ -3067,9 +3157,9 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, family, &secattr);
if (rc == 0)
- smack_from_secattr(&secattr, smack);
+ sp = smack_from_secattr(&secattr, ssp);
else
- strncpy(smack, smack_known_huh.smk_known, SMK_MAXLEN);
+ sp = smack_known_huh.smk_known;
netlbl_secattr_destroy(&secattr);
#ifdef CONFIG_AUDIT
@@ -3082,7 +3172,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
* Receiving a packet requires that the other end be able to write
* here. Read access is not required.
*/
- rc = smk_access(smack, ssp->smk_in, MAY_WRITE, &ad);
+ rc = smk_access(sp, ssp->smk_in, MAY_WRITE, &ad);
if (rc != 0)
return rc;
@@ -3090,7 +3180,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
* Save the peer's label in the request_sock so we can later setup
* smk_packet in the child socket so that SO_PEERCRED can report it.
*/
- req->peer_secid = smack_to_secid(smack);
+ req->peer_secid = smack_to_secid(sp);
/*
* We need to decide if we want to label the incoming connection here
@@ -3103,7 +3193,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
if (smack_host_label(&addr) == NULL) {
rcu_read_unlock();
netlbl_secattr_init(&secattr);
- smack_to_secattr(smack, &secattr);
+ smack_to_secattr(sp, &secattr);
rc = netlbl_req_setattr(req, &secattr);
netlbl_secattr_destroy(&secattr);
} else {
@@ -3125,13 +3215,11 @@ static void smack_inet_csk_clone(struct sock *sk,
const struct request_sock *req)
{
struct socket_smack *ssp = sk->sk_security;
- char *smack;
- if (req->peer_secid != 0) {
- smack = smack_from_secid(req->peer_secid);
- strncpy(ssp->smk_packet, smack, SMK_MAXLEN);
- } else
- ssp->smk_packet[0] = '\0';
+ if (req->peer_secid != 0)
+ ssp->smk_packet = smack_from_secid(req->peer_secid);
+ else
+ ssp->smk_packet = NULL;
}
/*
@@ -3409,6 +3497,8 @@ struct security_operations smack_ops = {
.sb_umount = smack_sb_umount,
.bprm_set_creds = smack_bprm_set_creds,
+ .bprm_committing_creds = smack_bprm_committing_creds,
+ .bprm_secureexec = smack_bprm_secureexec,
.inode_alloc_security = smack_inode_alloc_security,
.inode_free_security = smack_inode_free_security,
@@ -3440,6 +3530,8 @@ struct security_operations smack_ops = {
.file_send_sigiotask = smack_file_send_sigiotask,
.file_receive = smack_file_receive,
+ .dentry_open = smack_dentry_open,
+
.cred_alloc_blank = smack_cred_alloc_blank,
.cred_free = smack_cred_free,
.cred_prepare = smack_cred_prepare,
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index f93460156dc..6aceef518a4 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -44,6 +44,7 @@ enum smk_inos {
SMK_ONLYCAP = 9, /* the only "capable" label */
SMK_LOGGING = 10, /* logging */
SMK_LOAD_SELF = 11, /* task specific rules */
+ SMK_ACCESSES = 12, /* access policy */
};
/*
@@ -85,6 +86,16 @@ char *smack_onlycap;
*/
LIST_HEAD(smk_netlbladdr_list);
+
+/*
+ * Rule lists are maintained for each label.
+ * This master list is just for reading /smack/load.
+ */
+struct smack_master_list {
+ struct list_head list;
+ struct smack_rule *smk_rule;
+};
+
LIST_HEAD(smack_rule_list);
static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
@@ -92,7 +103,7 @@ static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
const char *smack_cipso_option = SMACK_CIPSO_OPTION;
-#define SEQ_READ_FINISHED 1
+#define SEQ_READ_FINISHED ((loff_t)-1)
/*
* Values for parsing cipso rules
@@ -159,9 +170,13 @@ static int smk_set_access(struct smack_rule *srp, struct list_head *rule_list,
mutex_lock(rule_lock);
+ /*
+ * Because the object label is less likely to match
+ * than the subject label check it first
+ */
list_for_each_entry_rcu(sp, rule_list, list) {
- if (sp->smk_subject == srp->smk_subject &&
- sp->smk_object == srp->smk_object) {
+ if (sp->smk_object == srp->smk_object &&
+ sp->smk_subject == srp->smk_subject) {
found = 1;
sp->smk_access = srp->smk_access;
break;
@@ -176,6 +191,99 @@ static int smk_set_access(struct smack_rule *srp, struct list_head *rule_list,
}
/**
+ * smk_parse_rule - parse Smack rule from load string
+ * @data: string to be parsed whose size is SMK_LOADLEN
+ * @rule: Smack rule
+ * @import: if non-zero, import labels
+ */
+static int smk_parse_rule(const char *data, struct smack_rule *rule, int import)
+{
+ char smack[SMK_LABELLEN];
+ struct smack_known *skp;
+
+ if (import) {
+ rule->smk_subject = smk_import(data, 0);
+ if (rule->smk_subject == NULL)
+ return -1;
+
+ rule->smk_object = smk_import(data + SMK_LABELLEN, 0);
+ if (rule->smk_object == NULL)
+ return -1;
+ } else {
+ smk_parse_smack(data, 0, smack);
+ skp = smk_find_entry(smack);
+ if (skp == NULL)
+ return -1;
+ rule->smk_subject = skp->smk_known;
+
+ smk_parse_smack(data + SMK_LABELLEN, 0, smack);
+ skp = smk_find_entry(smack);
+ if (skp == NULL)
+ return -1;
+ rule->smk_object = skp->smk_known;
+ }
+
+ rule->smk_access = 0;
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN]) {
+ case '-':
+ break;
+ case 'r':
+ case 'R':
+ rule->smk_access |= MAY_READ;
+ break;
+ default:
+ return -1;
+ }
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN + 1]) {
+ case '-':
+ break;
+ case 'w':
+ case 'W':
+ rule->smk_access |= MAY_WRITE;
+ break;
+ default:
+ return -1;
+ }
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN + 2]) {
+ case '-':
+ break;
+ case 'x':
+ case 'X':
+ rule->smk_access |= MAY_EXEC;
+ break;
+ default:
+ return -1;
+ }
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN + 3]) {
+ case '-':
+ break;
+ case 'a':
+ case 'A':
+ rule->smk_access |= MAY_APPEND;
+ break;
+ default:
+ return -1;
+ }
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN + 4]) {
+ case '-':
+ break;
+ case 't':
+ case 'T':
+ rule->smk_access |= MAY_TRANSMUTE;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
* smk_write_load_list - write() for any /smack/load
* @file: file pointer, not actually used
* @buf: where to get the data from
@@ -197,9 +305,12 @@ static ssize_t smk_write_load_list(struct file *file, const char __user *buf,
struct list_head *rule_list,
struct mutex *rule_lock)
{
+ struct smack_master_list *smlp;
+ struct smack_known *skp;
struct smack_rule *rule;
char *data;
int rc = -EINVAL;
+ int load = 0;
/*
* No partial writes.
@@ -234,69 +345,14 @@ static ssize_t smk_write_load_list(struct file *file, const char __user *buf,
goto out;
}
- rule->smk_subject = smk_import(data, 0);
- if (rule->smk_subject == NULL)
- goto out_free_rule;
-
- rule->smk_object = smk_import(data + SMK_LABELLEN, 0);
- if (rule->smk_object == NULL)
+ if (smk_parse_rule(data, rule, 1))
goto out_free_rule;
- rule->smk_access = 0;
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN]) {
- case '-':
- break;
- case 'r':
- case 'R':
- rule->smk_access |= MAY_READ;
- break;
- default:
- goto out_free_rule;
- }
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 1]) {
- case '-':
- break;
- case 'w':
- case 'W':
- rule->smk_access |= MAY_WRITE;
- break;
- default:
- goto out_free_rule;
- }
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 2]) {
- case '-':
- break;
- case 'x':
- case 'X':
- rule->smk_access |= MAY_EXEC;
- break;
- default:
- goto out_free_rule;
- }
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 3]) {
- case '-':
- break;
- case 'a':
- case 'A':
- rule->smk_access |= MAY_APPEND;
- break;
- default:
- goto out_free_rule;
- }
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 4]) {
- case '-':
- break;
- case 't':
- case 'T':
- rule->smk_access |= MAY_TRANSMUTE;
- break;
- default:
- goto out_free_rule;
+ if (rule_list == NULL) {
+ load = 1;
+ skp = smk_find_entry(rule->smk_subject);
+ rule_list = &skp->smk_rules;
+ rule_lock = &skp->smk_rules_lock;
}
rc = count;
@@ -304,8 +360,15 @@ static ssize_t smk_write_load_list(struct file *file, const char __user *buf,
* smk_set_access returns true if there was already a rule
* for the subject/object pair, and false if it was new.
*/
- if (!smk_set_access(rule, rule_list, rule_lock))
+ if (!smk_set_access(rule, rule_list, rule_lock)) {
+ smlp = kzalloc(sizeof(*smlp), GFP_KERNEL);
+ if (smlp != NULL) {
+ smlp->smk_rule = rule;
+ list_add_rcu(&smlp->list, &smack_rule_list);
+ } else
+ rc = -ENOMEM;
goto out;
+ }
out_free_rule:
kfree(rule);
@@ -321,11 +384,24 @@ out:
static void *load_seq_start(struct seq_file *s, loff_t *pos)
{
- if (*pos == SEQ_READ_FINISHED)
+ struct list_head *list;
+
+ /*
+ * This is 0 the first time through.
+ */
+ if (s->index == 0)
+ s->private = &smack_rule_list;
+
+ if (s->private == NULL)
return NULL;
- if (list_empty(&smack_rule_list))
+
+ list = s->private;
+ if (list_empty(list))
return NULL;
- return smack_rule_list.next;
+
+ if (s->index == 0)
+ return list->next;
+ return list;
}
static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
@@ -333,17 +409,19 @@ static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
struct list_head *list = v;
if (list_is_last(list, &smack_rule_list)) {
- *pos = SEQ_READ_FINISHED;
+ s->private = NULL;
return NULL;
}
+ s->private = list->next;
return list->next;
}
static int load_seq_show(struct seq_file *s, void *v)
{
struct list_head *list = v;
- struct smack_rule *srp =
- list_entry(list, struct smack_rule, list);
+ struct smack_master_list *smlp =
+ list_entry(list, struct smack_master_list, list);
+ struct smack_rule *srp = smlp->smk_rule;
seq_printf(s, "%s %s", (char *)srp->smk_subject,
(char *)srp->smk_object);
@@ -412,8 +490,7 @@ static ssize_t smk_write_load(struct file *file, const char __user *buf,
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
- return smk_write_load_list(file, buf, count, ppos, &smack_rule_list,
- &smack_list_lock);
+ return smk_write_load_list(file, buf, count, ppos, NULL, NULL);
}
static const struct file_operations smk_load_ops = {
@@ -1425,6 +1502,44 @@ static const struct file_operations smk_load_self_ops = {
.write = smk_write_load_self,
.release = seq_release,
};
+
+/**
+ * smk_write_access - handle access check transaction
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_access(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct smack_rule rule;
+ char *data;
+ int res;
+
+ data = simple_transaction_get(file, buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (count < SMK_LOADLEN || smk_parse_rule(data, &rule, 0))
+ return -EINVAL;
+
+ res = smk_access(rule.smk_subject, rule.smk_object, rule.smk_access,
+ NULL);
+ data[0] = res == 0 ? '1' : '0';
+ data[1] = '\0';
+
+ simple_transaction_set(file, 2);
+ return SMK_LOADLEN;
+}
+
+static const struct file_operations smk_access_ops = {
+ .write = smk_write_access,
+ .read = simple_transaction_read,
+ .release = simple_transaction_release,
+ .llseek = generic_file_llseek,
+};
+
/**
* smk_fill_super - fill the /smackfs superblock
* @sb: the empty superblock
@@ -1459,6 +1574,8 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
"logging", &smk_logging_ops, S_IRUGO|S_IWUSR},
[SMK_LOAD_SELF] = {
"load-self", &smk_load_self_ops, S_IRUGO|S_IWUGO},
+ [SMK_ACCESSES] = {
+ "access", &smk_access_ops, S_IRUGO|S_IWUGO},
/* last one */
{""}
};
@@ -1534,6 +1651,20 @@ static int __init init_smk_fs(void)
smk_cipso_doi();
smk_unlbl_ambient(NULL);
+ mutex_init(&smack_known_floor.smk_rules_lock);
+ mutex_init(&smack_known_hat.smk_rules_lock);
+ mutex_init(&smack_known_huh.smk_rules_lock);
+ mutex_init(&smack_known_invalid.smk_rules_lock);
+ mutex_init(&smack_known_star.smk_rules_lock);
+ mutex_init(&smack_known_web.smk_rules_lock);
+
+ INIT_LIST_HEAD(&smack_known_floor.smk_rules);
+ INIT_LIST_HEAD(&smack_known_hat.smk_rules);
+ INIT_LIST_HEAD(&smack_known_huh.smk_rules);
+ INIT_LIST_HEAD(&smack_known_invalid.smk_rules);
+ INIT_LIST_HEAD(&smack_known_star.smk_rules);
+ INIT_LIST_HEAD(&smack_known_web.smk_rules);
+
return err;
}
diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig
index 7c7f8c16c10..8eb779b9d77 100644
--- a/security/tomoyo/Kconfig
+++ b/security/tomoyo/Kconfig
@@ -1,8 +1,10 @@
config SECURITY_TOMOYO
bool "TOMOYO Linux Support"
depends on SECURITY
+ depends on NET
select SECURITYFS
select SECURITY_PATH
+ select SECURITY_NETWORK
default n
help
This selects TOMOYO Linux, pathname-based access control.
diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
index 95278b71fc2..56a0c7be409 100644
--- a/security/tomoyo/Makefile
+++ b/security/tomoyo/Makefile
@@ -1,4 +1,4 @@
-obj-y = audit.o common.o condition.o domain.o file.o gc.o group.o load_policy.o memory.o mount.o realpath.o securityfs_if.o tomoyo.o util.o
+obj-y = audit.o common.o condition.o domain.o environ.o file.o gc.o group.o load_policy.o memory.o mount.o network.o realpath.o securityfs_if.o tomoyo.o util.o
$(obj)/policy/profile.conf:
@mkdir -p $(obj)/policy/
@@ -27,7 +27,7 @@ $(obj)/policy/stat.conf:
@touch $@
$(obj)/builtin-policy.h: $(obj)/policy/profile.conf $(obj)/policy/exception_policy.conf $(obj)/policy/domain_policy.conf $(obj)/policy/manager.conf $(obj)/policy/stat.conf
- @echo Generating built-in policy for TOMOYO 2.4.x.
+ @echo Generating built-in policy for TOMOYO 2.5.x.
@echo "static char tomoyo_builtin_profile[] __initdata =" > $@.tmp
@sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/profile.conf >> $@.tmp
@echo "\"\";" >> $@.tmp
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c
index 5dbb1f7617c..075c3a6d164 100644
--- a/security/tomoyo/audit.c
+++ b/security/tomoyo/audit.c
@@ -313,6 +313,7 @@ static unsigned int tomoyo_log_count;
*/
static bool tomoyo_get_audit(const struct tomoyo_policy_namespace *ns,
const u8 profile, const u8 index,
+ const struct tomoyo_acl_info *matched_acl,
const bool is_granted)
{
u8 mode;
@@ -324,6 +325,9 @@ static bool tomoyo_get_audit(const struct tomoyo_policy_namespace *ns,
p = tomoyo_profile(ns, profile);
if (tomoyo_log_count >= p->pref[TOMOYO_PREF_MAX_AUDIT_LOG])
return false;
+ if (is_granted && matched_acl && matched_acl->cond &&
+ matched_acl->cond->grant_log != TOMOYO_GRANTLOG_AUTO)
+ return matched_acl->cond->grant_log == TOMOYO_GRANTLOG_YES;
mode = p->config[index];
if (mode == TOMOYO_CONFIG_USE_DEFAULT)
mode = p->config[category];
@@ -350,7 +354,8 @@ void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt,
char *buf;
struct tomoyo_log *entry;
bool quota_exceeded = false;
- if (!tomoyo_get_audit(r->domain->ns, r->profile, r->type, r->granted))
+ if (!tomoyo_get_audit(r->domain->ns, r->profile, r->type,
+ r->matched_acl, r->granted))
goto out;
buf = tomoyo_init_log(r, len, fmt, args);
if (!buf)
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 2e43aec1c36..150911c7ff0 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -20,6 +20,7 @@ const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE] = {
/* String table for /sys/kernel/security/tomoyo/profile */
const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX
+ TOMOYO_MAX_MAC_CATEGORY_INDEX] = {
+ /* CONFIG::file group */
[TOMOYO_MAC_FILE_EXECUTE] = "execute",
[TOMOYO_MAC_FILE_OPEN] = "open",
[TOMOYO_MAC_FILE_CREATE] = "create",
@@ -43,7 +44,28 @@ const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX
[TOMOYO_MAC_FILE_MOUNT] = "mount",
[TOMOYO_MAC_FILE_UMOUNT] = "unmount",
[TOMOYO_MAC_FILE_PIVOT_ROOT] = "pivot_root",
+ /* CONFIG::network group */
+ [TOMOYO_MAC_NETWORK_INET_STREAM_BIND] = "inet_stream_bind",
+ [TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN] = "inet_stream_listen",
+ [TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT] = "inet_stream_connect",
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_BIND] = "inet_dgram_bind",
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_SEND] = "inet_dgram_send",
+ [TOMOYO_MAC_NETWORK_INET_RAW_BIND] = "inet_raw_bind",
+ [TOMOYO_MAC_NETWORK_INET_RAW_SEND] = "inet_raw_send",
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND] = "unix_stream_bind",
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN] = "unix_stream_listen",
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT] = "unix_stream_connect",
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND] = "unix_dgram_bind",
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND] = "unix_dgram_send",
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND] = "unix_seqpacket_bind",
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN] = "unix_seqpacket_listen",
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] = "unix_seqpacket_connect",
+ /* CONFIG::misc group */
+ [TOMOYO_MAC_ENVIRON] = "env",
+ /* CONFIG group */
[TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_FILE] = "file",
+ [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_NETWORK] = "network",
+ [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_MISC] = "misc",
};
/* String table for conditions. */
@@ -130,10 +152,20 @@ const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION] = {
[TOMOYO_TYPE_UMOUNT] = "unmount",
};
+/* String table for socket's operation. */
+const char * const tomoyo_socket_keyword[TOMOYO_MAX_NETWORK_OPERATION] = {
+ [TOMOYO_NETWORK_BIND] = "bind",
+ [TOMOYO_NETWORK_LISTEN] = "listen",
+ [TOMOYO_NETWORK_CONNECT] = "connect",
+ [TOMOYO_NETWORK_SEND] = "send",
+};
+
/* String table for categories. */
static const char * const tomoyo_category_keywords
[TOMOYO_MAX_MAC_CATEGORY_INDEX] = {
- [TOMOYO_MAC_CATEGORY_FILE] = "file",
+ [TOMOYO_MAC_CATEGORY_FILE] = "file",
+ [TOMOYO_MAC_CATEGORY_NETWORK] = "network",
+ [TOMOYO_MAC_CATEGORY_MISC] = "misc",
};
/* Permit policy management by non-root user? */
@@ -230,13 +262,17 @@ static void tomoyo_set_string(struct tomoyo_io_buffer *head, const char *string)
WARN_ON(1);
}
+static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt,
+ ...) __printf(2, 3);
+
/**
* tomoyo_io_printf - printf() to "struct tomoyo_io_buffer" structure.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @fmt: The printf()'s format string, followed by parameters.
*/
-void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...)
+static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt,
+ ...)
{
va_list args;
size_t len;
@@ -313,7 +349,7 @@ void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns)
INIT_LIST_HEAD(&ns->group_list[idx]);
for (idx = 0; idx < TOMOYO_MAX_POLICY; idx++)
INIT_LIST_HEAD(&ns->policy_list[idx]);
- ns->profile_version = 20100903;
+ ns->profile_version = 20110903;
tomoyo_namespace_enabled = !list_empty(&tomoyo_namespace_list);
list_add_tail_rcu(&ns->namespace_list, &tomoyo_namespace_list);
}
@@ -466,8 +502,10 @@ static struct tomoyo_profile *tomoyo_assign_profile
TOMOYO_CONFIG_WANT_REJECT_LOG;
memset(ptr->config, TOMOYO_CONFIG_USE_DEFAULT,
sizeof(ptr->config));
- ptr->pref[TOMOYO_PREF_MAX_AUDIT_LOG] = 1024;
- ptr->pref[TOMOYO_PREF_MAX_LEARNING_ENTRY] = 2048;
+ ptr->pref[TOMOYO_PREF_MAX_AUDIT_LOG] =
+ CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG;
+ ptr->pref[TOMOYO_PREF_MAX_LEARNING_ENTRY] =
+ CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY;
mb(); /* Avoid out-of-order execution. */
ns->profile_ptr[profile] = ptr;
entry = NULL;
@@ -951,14 +989,12 @@ static bool tomoyo_select_domain(struct tomoyo_io_buffer *head,
(global_pid = true, sscanf(data, "global-pid=%u", &pid) == 1)) {
struct task_struct *p;
rcu_read_lock();
- read_lock(&tasklist_lock);
if (global_pid)
p = find_task_by_pid_ns(pid, &init_pid_ns);
else
p = find_task_by_vpid(pid);
if (p)
domain = tomoyo_real_domain(p);
- read_unlock(&tasklist_lock);
rcu_read_unlock();
} else if (!strncmp(data, "domain=", 7)) {
if (tomoyo_domain_def(data + 7))
@@ -982,6 +1018,48 @@ static bool tomoyo_select_domain(struct tomoyo_io_buffer *head,
}
/**
+ * tomoyo_same_task_acl - Check for duplicated "struct tomoyo_task_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_task_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_task_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_task_acl *p2 = container_of(b, typeof(*p2), head);
+ return p1->domainname == p2->domainname;
+}
+
+/**
+ * tomoyo_write_task - Update task related list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_task(struct tomoyo_acl_param *param)
+{
+ int error = -EINVAL;
+ if (tomoyo_str_starts(&param->data, "manual_domain_transition ")) {
+ struct tomoyo_task_acl e = {
+ .head.type = TOMOYO_TYPE_MANUAL_TASK_ACL,
+ .domainname = tomoyo_get_domainname(param),
+ };
+ if (e.domainname)
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_task_acl,
+ NULL);
+ tomoyo_put_name(e.domainname);
+ }
+ return error;
+}
+
+/**
* tomoyo_delete_domain - Delete a domain.
*
* @domainname: The name of domain.
@@ -1039,11 +1117,16 @@ static int tomoyo_write_domain2(struct tomoyo_policy_namespace *ns,
static const struct {
const char *keyword;
int (*write) (struct tomoyo_acl_param *);
- } tomoyo_callback[1] = {
+ } tomoyo_callback[5] = {
{ "file ", tomoyo_write_file },
+ { "network inet ", tomoyo_write_inet_network },
+ { "network unix ", tomoyo_write_unix_network },
+ { "misc ", tomoyo_write_misc },
+ { "task ", tomoyo_write_task },
};
u8 i;
- for (i = 0; i < 1; i++) {
+
+ for (i = 0; i < ARRAY_SIZE(tomoyo_callback); i++) {
if (!tomoyo_str_starts(&param.data,
tomoyo_callback[i].keyword))
continue;
@@ -1127,6 +1210,10 @@ static bool tomoyo_print_condition(struct tomoyo_io_buffer *head,
case 0:
head->r.cond_index = 0;
head->r.cond_step++;
+ if (cond->transit) {
+ tomoyo_set_space(head);
+ tomoyo_set_string(head, cond->transit->name);
+ }
/* fall through */
case 1:
{
@@ -1239,6 +1326,10 @@ static bool tomoyo_print_condition(struct tomoyo_io_buffer *head,
head->r.cond_step++;
/* fall through */
case 3:
+ if (cond->grant_log != TOMOYO_GRANTLOG_AUTO)
+ tomoyo_io_printf(head, " grant_log=%s",
+ tomoyo_yesno(cond->grant_log ==
+ TOMOYO_GRANTLOG_YES));
tomoyo_set_lf(head);
return true;
}
@@ -1306,6 +1397,12 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
if (first)
return true;
tomoyo_print_name_union(head, &ptr->name);
+ } else if (acl_type == TOMOYO_TYPE_MANUAL_TASK_ACL) {
+ struct tomoyo_task_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ tomoyo_set_group(head, "task ");
+ tomoyo_set_string(head, "manual_domain_transition ");
+ tomoyo_set_string(head, ptr->domainname->name);
} else if (head->r.print_transition_related_only) {
return true;
} else if (acl_type == TOMOYO_TYPE_PATH2_ACL) {
@@ -1370,6 +1467,60 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
tomoyo_print_number_union(head, &ptr->mode);
tomoyo_print_number_union(head, &ptr->major);
tomoyo_print_number_union(head, &ptr->minor);
+ } else if (acl_type == TOMOYO_TYPE_INET_ACL) {
+ struct tomoyo_inet_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u8 perm = ptr->perm;
+
+ for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "network inet ");
+ tomoyo_set_string(head, tomoyo_proto_keyword
+ [ptr->protocol]);
+ tomoyo_set_space(head);
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_socket_keyword[bit]);
+ }
+ if (first)
+ return true;
+ tomoyo_set_space(head);
+ if (ptr->address.group) {
+ tomoyo_set_string(head, "@");
+ tomoyo_set_string(head, ptr->address.group->group_name
+ ->name);
+ } else {
+ char buf[128];
+ tomoyo_print_ip(buf, sizeof(buf), &ptr->address);
+ tomoyo_io_printf(head, "%s", buf);
+ }
+ tomoyo_print_number_union(head, &ptr->port);
+ } else if (acl_type == TOMOYO_TYPE_UNIX_ACL) {
+ struct tomoyo_unix_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u8 perm = ptr->perm;
+
+ for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "network unix ");
+ tomoyo_set_string(head, tomoyo_proto_keyword
+ [ptr->protocol]);
+ tomoyo_set_space(head);
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_socket_keyword[bit]);
+ }
+ if (first)
+ return true;
+ tomoyo_print_name_union(head, &ptr->name);
} else if (acl_type == TOMOYO_TYPE_MOUNT_ACL) {
struct tomoyo_mount_acl *ptr =
container_of(acl, typeof(*ptr), head);
@@ -1378,6 +1529,12 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
tomoyo_print_name_union(head, &ptr->dir_name);
tomoyo_print_name_union(head, &ptr->fs_type);
tomoyo_print_number_union(head, &ptr->flags);
+ } else if (acl_type == TOMOYO_TYPE_ENV_ACL) {
+ struct tomoyo_env_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+
+ tomoyo_set_group(head, "misc env ");
+ tomoyo_set_string(head, ptr->env->name);
}
if (acl->cond) {
head->r.print_cond_part = true;
@@ -1510,14 +1667,12 @@ static void tomoyo_read_pid(struct tomoyo_io_buffer *head)
global_pid = true;
pid = (unsigned int) simple_strtoul(buf, NULL, 10);
rcu_read_lock();
- read_lock(&tasklist_lock);
if (global_pid)
p = find_task_by_pid_ns(pid, &init_pid_ns);
else
p = find_task_by_vpid(pid);
if (p)
domain = tomoyo_real_domain(p);
- read_unlock(&tasklist_lock);
rcu_read_unlock();
if (!domain)
return;
@@ -1537,8 +1692,9 @@ static const char *tomoyo_transition_type[TOMOYO_MAX_TRANSITION_TYPE] = {
/* String table for grouping keywords. */
static const char *tomoyo_group_name[TOMOYO_MAX_GROUP] = {
- [TOMOYO_PATH_GROUP] = "path_group ",
- [TOMOYO_NUMBER_GROUP] = "number_group ",
+ [TOMOYO_PATH_GROUP] = "path_group ",
+ [TOMOYO_NUMBER_GROUP] = "number_group ",
+ [TOMOYO_ADDRESS_GROUP] = "address_group ",
};
/**
@@ -1580,7 +1736,7 @@ static int tomoyo_write_exception(struct tomoyo_io_buffer *head)
}
/**
- * tomoyo_read_group - Read "struct tomoyo_path_group"/"struct tomoyo_number_group" list.
+ * tomoyo_read_group - Read "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @idx: Index number.
@@ -1617,6 +1773,15 @@ static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx)
(ptr,
struct tomoyo_number_group,
head)->number);
+ } else if (idx == TOMOYO_ADDRESS_GROUP) {
+ char buffer[128];
+
+ struct tomoyo_address_group *member =
+ container_of(ptr, typeof(*member),
+ head);
+ tomoyo_print_ip(buffer, sizeof(buffer),
+ &member->address);
+ tomoyo_io_printf(head, " %s", buffer);
}
tomoyo_set_lf(head);
}
@@ -2066,27 +2231,7 @@ static int tomoyo_write_answer(struct tomoyo_io_buffer *head)
static void tomoyo_read_version(struct tomoyo_io_buffer *head)
{
if (!head->r.eof) {
- tomoyo_io_printf(head, "2.4.0");
- head->r.eof = true;
- }
-}
-
-/**
- * tomoyo_read_self_domain - Get the current process's domainname.
- *
- * @head: Pointer to "struct tomoyo_io_buffer".
- *
- * Returns the current process's domainname.
- */
-static void tomoyo_read_self_domain(struct tomoyo_io_buffer *head)
-{
- if (!head->r.eof) {
- /*
- * tomoyo_domain()->domainname != NULL
- * because every process belongs to a domain and
- * the domain's name cannot be NULL.
- */
- tomoyo_io_printf(head, "%s", tomoyo_domain()->domainname->name);
+ tomoyo_io_printf(head, "2.5.0");
head->r.eof = true;
}
}
@@ -2221,10 +2366,6 @@ int tomoyo_open_control(const u8 type, struct file *file)
head->poll = tomoyo_poll_log;
head->read = tomoyo_read_log;
break;
- case TOMOYO_SELFDOMAIN:
- /* /sys/kernel/security/tomoyo/self_domain */
- head->read = tomoyo_read_self_domain;
- break;
case TOMOYO_PROCESS_STATUS:
/* /sys/kernel/security/tomoyo/.process_status */
head->write = tomoyo_write_pid;
@@ -2453,6 +2594,7 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
return -EFAULT;
if (mutex_lock_interruptible(&head->io_sem))
return -EINTR;
+ head->read_user_buf_avail = 0;
idx = tomoyo_read_lock();
/* Read a line and dispatch it to the policy handler. */
while (avail_len > 0) {
@@ -2562,11 +2704,11 @@ void tomoyo_check_profile(void)
struct tomoyo_domain_info *domain;
const int idx = tomoyo_read_lock();
tomoyo_policy_loaded = true;
- printk(KERN_INFO "TOMOYO: 2.4.0\n");
+ printk(KERN_INFO "TOMOYO: 2.5.0\n");
list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
const u8 profile = domain->profile;
const struct tomoyo_policy_namespace *ns = domain->ns;
- if (ns->profile_version != 20100903)
+ if (ns->profile_version != 20110903)
printk(KERN_ERR
"Profile version %u is not supported.\n",
ns->profile_version);
@@ -2577,9 +2719,9 @@ void tomoyo_check_profile(void)
else
continue;
printk(KERN_ERR
- "Userland tools for TOMOYO 2.4 must be installed and "
+ "Userland tools for TOMOYO 2.5 must be installed and "
"policy must be initialized.\n");
- printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.4/ "
+ printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.5/ "
"for more information.\n");
panic("STOP!");
}
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index f7fbaa66e44..ed311d7a8ce 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -3,7 +3,7 @@
*
* Header file for TOMOYO.
*
- * Copyright (C) 2005-2010 NTT DATA CORPORATION
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
#ifndef _SECURITY_TOMOYO_COMMON_H
@@ -23,6 +23,16 @@
#include <linux/poll.h>
#include <linux/binfmts.h>
#include <linux/highmem.h>
+#include <linux/net.h>
+#include <linux/inet.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/un.h>
+#include <net/sock.h>
+#include <net/af_unix.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
/********** Constants definitions. **********/
@@ -34,8 +44,17 @@
#define TOMOYO_HASH_BITS 8
#define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS)
+/*
+ * TOMOYO checks only SOCK_STREAM, SOCK_DGRAM, SOCK_RAW, SOCK_SEQPACKET.
+ * Therefore, we don't need SOCK_MAX.
+ */
+#define TOMOYO_SOCK_MAX 6
+
#define TOMOYO_EXEC_TMPSIZE 4096
+/* Garbage collector is trying to kfree() this element. */
+#define TOMOYO_GC_IN_PROGRESS -1
+
/* Profile number is an integer between 0 and 255. */
#define TOMOYO_MAX_PROFILES 256
@@ -136,6 +155,7 @@ enum tomoyo_mode_index {
/* Index numbers for entry type. */
enum tomoyo_policy_id {
TOMOYO_ID_GROUP,
+ TOMOYO_ID_ADDRESS_GROUP,
TOMOYO_ID_PATH_GROUP,
TOMOYO_ID_NUMBER_GROUP,
TOMOYO_ID_TRANSITION_CONTROL,
@@ -162,10 +182,21 @@ enum tomoyo_domain_info_flags_index {
TOMOYO_MAX_DOMAIN_INFO_FLAGS
};
+/* Index numbers for audit type. */
+enum tomoyo_grant_log {
+ /* Follow profile's configuration. */
+ TOMOYO_GRANTLOG_AUTO,
+ /* Do not generate grant log. */
+ TOMOYO_GRANTLOG_NO,
+ /* Generate grant_log. */
+ TOMOYO_GRANTLOG_YES,
+};
+
/* Index numbers for group entries. */
enum tomoyo_group_id {
TOMOYO_PATH_GROUP,
TOMOYO_NUMBER_GROUP,
+ TOMOYO_ADDRESS_GROUP,
TOMOYO_MAX_GROUP
};
@@ -196,6 +227,10 @@ enum tomoyo_acl_entry_type_index {
TOMOYO_TYPE_PATH_NUMBER_ACL,
TOMOYO_TYPE_MKDEV_ACL,
TOMOYO_TYPE_MOUNT_ACL,
+ TOMOYO_TYPE_INET_ACL,
+ TOMOYO_TYPE_UNIX_ACL,
+ TOMOYO_TYPE_ENV_ACL,
+ TOMOYO_TYPE_MANUAL_TASK_ACL,
};
/* Index numbers for access controls with one pathname. */
@@ -228,6 +263,15 @@ enum tomoyo_mkdev_acl_index {
TOMOYO_MAX_MKDEV_OPERATION
};
+/* Index numbers for socket operations. */
+enum tomoyo_network_acl_index {
+ TOMOYO_NETWORK_BIND, /* bind() operation. */
+ TOMOYO_NETWORK_LISTEN, /* listen() operation. */
+ TOMOYO_NETWORK_CONNECT, /* connect() operation. */
+ TOMOYO_NETWORK_SEND, /* send() operation. */
+ TOMOYO_MAX_NETWORK_OPERATION
+};
+
/* Index numbers for access controls with two pathnames. */
enum tomoyo_path2_acl_index {
TOMOYO_TYPE_LINK,
@@ -255,7 +299,6 @@ enum tomoyo_securityfs_interface_index {
TOMOYO_EXCEPTIONPOLICY,
TOMOYO_PROCESS_STATUS,
TOMOYO_STAT,
- TOMOYO_SELFDOMAIN,
TOMOYO_AUDIT,
TOMOYO_VERSION,
TOMOYO_PROFILE,
@@ -300,12 +343,30 @@ enum tomoyo_mac_index {
TOMOYO_MAC_FILE_MOUNT,
TOMOYO_MAC_FILE_UMOUNT,
TOMOYO_MAC_FILE_PIVOT_ROOT,
+ TOMOYO_MAC_NETWORK_INET_STREAM_BIND,
+ TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN,
+ TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT,
+ TOMOYO_MAC_NETWORK_INET_DGRAM_BIND,
+ TOMOYO_MAC_NETWORK_INET_DGRAM_SEND,
+ TOMOYO_MAC_NETWORK_INET_RAW_BIND,
+ TOMOYO_MAC_NETWORK_INET_RAW_SEND,
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND,
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN,
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT,
+ TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND,
+ TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND,
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND,
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN,
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT,
+ TOMOYO_MAC_ENVIRON,
TOMOYO_MAX_MAC_INDEX
};
/* Index numbers for category of functionality. */
enum tomoyo_mac_category_index {
TOMOYO_MAC_CATEGORY_FILE,
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ TOMOYO_MAC_CATEGORY_MISC,
TOMOYO_MAX_MAC_CATEGORY_INDEX
};
@@ -340,7 +401,7 @@ enum tomoyo_pref_index {
/* Common header for holding ACL entries. */
struct tomoyo_acl_head {
struct list_head list;
- bool is_deleted;
+ s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */
} __packed;
/* Common header for shared entries. */
@@ -397,13 +458,36 @@ struct tomoyo_request_info {
u8 operation;
} path_number;
struct {
+ const struct tomoyo_path_info *name;
+ } environ;
+ struct {
+ const __be32 *address;
+ u16 port;
+ /* One of values smaller than TOMOYO_SOCK_MAX. */
+ u8 protocol;
+ /* One of values in "enum tomoyo_network_acl_index". */
+ u8 operation;
+ bool is_ipv6;
+ } inet_network;
+ struct {
+ const struct tomoyo_path_info *address;
+ /* One of values smaller than TOMOYO_SOCK_MAX. */
+ u8 protocol;
+ /* One of values in "enum tomoyo_network_acl_index". */
+ u8 operation;
+ } unix_network;
+ struct {
const struct tomoyo_path_info *type;
const struct tomoyo_path_info *dir;
const struct tomoyo_path_info *dev;
unsigned long flags;
int need_dev;
} mount;
+ struct {
+ const struct tomoyo_path_info *domainname;
+ } task;
} param;
+ struct tomoyo_acl_info *matched_acl;
u8 param_type;
bool granted;
u8 retry;
@@ -442,7 +526,14 @@ struct tomoyo_number_union {
u8 value_type[2];
};
-/* Structure for "path_group"/"number_group" directive. */
+/* Structure for holding an IP address. */
+struct tomoyo_ipaddr_union {
+ struct in6_addr ip[2]; /* Big endian. */
+ struct tomoyo_group *group; /* Pointer to address group. */
+ bool is_ipv6; /* Valid only if @group == NULL. */
+};
+
+/* Structure for "path_group"/"number_group"/"address_group" directive. */
struct tomoyo_group {
struct tomoyo_shared_acl_head head;
const struct tomoyo_path_info *group_name;
@@ -461,6 +552,13 @@ struct tomoyo_number_group {
struct tomoyo_number_union number;
};
+/* Structure for "address_group" directive. */
+struct tomoyo_address_group {
+ struct tomoyo_acl_head head;
+ /* Structure for holding an IP address. */
+ struct tomoyo_ipaddr_union address;
+};
+
/* Subset of "struct stat". Used by conditional ACL and audit logs. */
struct tomoyo_mini_stat {
uid_t uid;
@@ -520,6 +618,7 @@ struct tomoyo_execve {
struct tomoyo_request_info r;
struct tomoyo_obj_info obj;
struct linux_binprm *bprm;
+ const struct tomoyo_path_info *transition;
/* For dumping argv[] and envp[]. */
struct tomoyo_page_dump dump;
/* For temporary use. */
@@ -554,6 +653,8 @@ struct tomoyo_condition {
u16 names_count; /* Number of "struct tomoyo_name_union names". */
u16 argc; /* Number of "struct tomoyo_argv". */
u16 envc; /* Number of "struct tomoyo_envp". */
+ u8 grant_log; /* One of values in "enum tomoyo_grant_log". */
+ const struct tomoyo_path_info *transit; /* Maybe NULL. */
/*
* struct tomoyo_condition_element condition[condc];
* struct tomoyo_number_union values[numbers_count];
@@ -567,7 +668,7 @@ struct tomoyo_condition {
struct tomoyo_acl_info {
struct list_head list;
struct tomoyo_condition *cond; /* Maybe NULL. */
- bool is_deleted;
+ s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */
u8 type; /* One of values in "enum tomoyo_acl_entry_type_index". */
} __packed;
@@ -587,6 +688,15 @@ struct tomoyo_domain_info {
};
/*
+ * Structure for "task manual_domain_transition" directive.
+ */
+struct tomoyo_task_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MANUAL_TASK_ACL */
+ /* Pointer to domainname. */
+ const struct tomoyo_path_info *domainname;
+};
+
+/*
* Structure for "file execute", "file read", "file write", "file append",
* "file unlink", "file getattr", "file rmdir", "file truncate",
* "file symlink", "file chroot" and "file unmount" directive.
@@ -638,6 +748,29 @@ struct tomoyo_mount_acl {
struct tomoyo_number_union flags;
};
+/* Structure for "misc env" directive in domain policy. */
+struct tomoyo_env_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_ENV_ACL */
+ const struct tomoyo_path_info *env; /* environment variable */
+};
+
+/* Structure for "network inet" directive. */
+struct tomoyo_inet_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_INET_ACL */
+ u8 protocol;
+ u8 perm; /* Bitmask of values in "enum tomoyo_network_acl_index" */
+ struct tomoyo_ipaddr_union address;
+ struct tomoyo_number_union port;
+};
+
+/* Structure for "network unix" directive. */
+struct tomoyo_unix_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_UNIX_ACL */
+ u8 protocol;
+ u8 perm; /* Bitmask of values in "enum tomoyo_network_acl_index" */
+ struct tomoyo_name_union name;
+};
+
/* Structure for holding a line from /sys/kernel/security/tomoyo/ interface. */
struct tomoyo_acl_param {
char *data;
@@ -773,7 +906,7 @@ struct tomoyo_policy_namespace {
struct list_head acl_group[TOMOYO_MAX_ACL_GROUPS];
/* List for connecting to tomoyo_namespace_list list. */
struct list_head namespace_list;
- /* Profile version. Currently only 20100903 is defined. */
+ /* Profile version. Currently only 20110903 is defined. */
unsigned int profile_version;
/* Name of this namespace (e.g. "<kernel>", "</usr/sbin/httpd>" ). */
const char *name;
@@ -781,6 +914,8 @@ struct tomoyo_policy_namespace {
/********** Function prototypes. **********/
+bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
+ const struct tomoyo_group *group);
bool tomoyo_compare_number_union(const unsigned long value,
const struct tomoyo_number_union *ptr);
bool tomoyo_condition(struct tomoyo_request_info *r,
@@ -796,6 +931,8 @@ bool tomoyo_memory_ok(void *ptr);
bool tomoyo_number_matches_group(const unsigned long min,
const unsigned long max,
const struct tomoyo_group *group);
+bool tomoyo_parse_ipaddr_union(struct tomoyo_acl_param *param,
+ struct tomoyo_ipaddr_union *ptr);
bool tomoyo_parse_name_union(struct tomoyo_acl_param *param,
struct tomoyo_name_union *ptr);
bool tomoyo_parse_number_union(struct tomoyo_acl_param *param,
@@ -805,6 +942,7 @@ bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
bool tomoyo_permstr(const char *string, const char *keyword);
bool tomoyo_str_starts(char **src, const char *find);
char *tomoyo_encode(const char *str);
+char *tomoyo_encode2(const char *str, int str_len);
char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt,
va_list args);
char *tomoyo_read_token(struct tomoyo_acl_param *param);
@@ -814,12 +952,17 @@ const char *tomoyo_get_exe(void);
const char *tomoyo_yesno(const unsigned int value);
const struct tomoyo_path_info *tomoyo_compare_name_union
(const struct tomoyo_path_info *name, const struct tomoyo_name_union *ptr);
+const struct tomoyo_path_info *tomoyo_get_domainname
+(struct tomoyo_acl_param *param);
const struct tomoyo_path_info *tomoyo_get_name(const char *name);
const struct tomoyo_path_info *tomoyo_path_matches_group
(const struct tomoyo_path_info *pathname, const struct tomoyo_group *group);
int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
struct path *path, const int flag);
int tomoyo_close_control(struct tomoyo_io_buffer *head);
+int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env);
+int tomoyo_execute_permission(struct tomoyo_request_info *r,
+ const struct tomoyo_path_info *filename);
int tomoyo_find_next_domain(struct linux_binprm *bprm);
int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile,
const u8 index);
@@ -838,10 +981,15 @@ int tomoyo_path_number_perm(const u8 operation, struct path *path,
unsigned long number);
int tomoyo_path_perm(const u8 operation, struct path *path,
const char *target);
-int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation,
- const struct tomoyo_path_info *filename);
int tomoyo_poll_control(struct file *file, poll_table *wait);
int tomoyo_poll_log(struct file *file, poll_table *wait);
+int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr,
+ int addr_len);
+int tomoyo_socket_connect_permission(struct socket *sock,
+ struct sockaddr *addr, int addr_len);
+int tomoyo_socket_listen_permission(struct socket *sock);
+int tomoyo_socket_sendmsg_permission(struct socket *sock, struct msghdr *msg,
+ int size);
int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...)
__printf(2, 3);
int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
@@ -860,8 +1008,11 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
int tomoyo_write_aggregator(struct tomoyo_acl_param *param);
int tomoyo_write_file(struct tomoyo_acl_param *param);
int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type);
+int tomoyo_write_misc(struct tomoyo_acl_param *param);
+int tomoyo_write_inet_network(struct tomoyo_acl_param *param);
int tomoyo_write_transition_control(struct tomoyo_acl_param *param,
const u8 type);
+int tomoyo_write_unix_network(struct tomoyo_acl_param *param);
ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer,
const int buffer_len);
ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
@@ -891,12 +1042,11 @@ void tomoyo_del_condition(struct list_head *element);
void tomoyo_fill_path_info(struct tomoyo_path_info *ptr);
void tomoyo_get_attributes(struct tomoyo_obj_info *obj);
void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns);
-void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...)
- __printf(2, 3);
void tomoyo_load_policy(const char *filename);
-void tomoyo_memory_free(void *ptr);
void tomoyo_normalize_line(unsigned char *buffer);
void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register);
+void tomoyo_print_ip(char *buf, const unsigned int size,
+ const struct tomoyo_ipaddr_union *ptr);
void tomoyo_print_ulong(char *buffer, const int buffer_len,
const unsigned long value, const u8 type);
void tomoyo_put_name_union(struct tomoyo_name_union *ptr);
@@ -919,6 +1069,8 @@ extern const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX
+ TOMOYO_MAX_MAC_CATEGORY_INDEX];
extern const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE];
extern const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION];
+extern const char * const tomoyo_proto_keyword[TOMOYO_SOCK_MAX];
+extern const char * const tomoyo_socket_keyword[TOMOYO_MAX_NETWORK_OPERATION];
extern const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX];
extern const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION];
extern const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION];
@@ -1098,6 +1250,21 @@ static inline bool tomoyo_same_number_union
}
/**
+ * tomoyo_same_ipaddr_union - Check for duplicated "struct tomoyo_ipaddr_union" entry.
+ *
+ * @a: Pointer to "struct tomoyo_ipaddr_union".
+ * @b: Pointer to "struct tomoyo_ipaddr_union".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_same_ipaddr_union
+(const struct tomoyo_ipaddr_union *a, const struct tomoyo_ipaddr_union *b)
+{
+ return !memcmp(a->ip, b->ip, sizeof(a->ip)) && a->group == b->group &&
+ a->is_ipv6 == b->is_ipv6;
+}
+
+/**
* tomoyo_current_namespace - Get "struct tomoyo_policy_namespace" for current thread.
*
* Returns pointer to "struct tomoyo_policy_namespace" for current thread.
diff --git a/security/tomoyo/condition.c b/security/tomoyo/condition.c
index 8a05f71eaf6..986330b8c73 100644
--- a/security/tomoyo/condition.c
+++ b/security/tomoyo/condition.c
@@ -348,6 +348,7 @@ static inline bool tomoyo_same_condition(const struct tomoyo_condition *a,
a->numbers_count == b->numbers_count &&
a->names_count == b->names_count &&
a->argc == b->argc && a->envc == b->envc &&
+ a->grant_log == b->grant_log && a->transit == b->transit &&
!memcmp(a + 1, b + 1, a->size - sizeof(*a));
}
@@ -399,8 +400,9 @@ static struct tomoyo_condition *tomoyo_commit_condition
found = true;
goto out;
}
- list_for_each_entry_rcu(ptr, &tomoyo_condition_list, head.list) {
- if (!tomoyo_same_condition(ptr, entry))
+ list_for_each_entry(ptr, &tomoyo_condition_list, head.list) {
+ if (!tomoyo_same_condition(ptr, entry) ||
+ atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS)
continue;
/* Same entry found. Share this entry. */
atomic_inc(&ptr->head.users);
@@ -410,8 +412,7 @@ static struct tomoyo_condition *tomoyo_commit_condition
if (!found) {
if (tomoyo_memory_ok(entry)) {
atomic_set(&entry->head.users, 1);
- list_add_rcu(&entry->head.list,
- &tomoyo_condition_list);
+ list_add(&entry->head.list, &tomoyo_condition_list);
} else {
found = true;
ptr = NULL;
@@ -428,6 +429,46 @@ out:
}
/**
+ * tomoyo_get_transit_preference - Parse domain transition preference for execve().
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @e: Pointer to "struct tomoyo_condition".
+ *
+ * Returns the condition string part.
+ */
+static char *tomoyo_get_transit_preference(struct tomoyo_acl_param *param,
+ struct tomoyo_condition *e)
+{
+ char * const pos = param->data;
+ bool flag;
+ if (*pos == '<') {
+ e->transit = tomoyo_get_domainname(param);
+ goto done;
+ }
+ {
+ char *cp = strchr(pos, ' ');
+ if (cp)
+ *cp = '\0';
+ flag = tomoyo_correct_path(pos) || !strcmp(pos, "keep") ||
+ !strcmp(pos, "initialize") || !strcmp(pos, "reset") ||
+ !strcmp(pos, "child") || !strcmp(pos, "parent");
+ if (cp)
+ *cp = ' ';
+ }
+ if (!flag)
+ return pos;
+ e->transit = tomoyo_get_name(tomoyo_read_token(param));
+done:
+ if (e->transit)
+ return param->data;
+ /*
+ * Return a bad read-only condition string that will let
+ * tomoyo_get_condition() return NULL.
+ */
+ return "/";
+}
+
+/**
* tomoyo_get_condition - Parse condition part.
*
* @param: Pointer to "struct tomoyo_acl_param".
@@ -443,7 +484,8 @@ struct tomoyo_condition *tomoyo_get_condition(struct tomoyo_acl_param *param)
struct tomoyo_argv *argv = NULL;
struct tomoyo_envp *envp = NULL;
struct tomoyo_condition e = { };
- char * const start_of_string = param->data;
+ char * const start_of_string =
+ tomoyo_get_transit_preference(param, &e);
char * const end_of_string = start_of_string + strlen(start_of_string);
char *pos;
rerun:
@@ -486,6 +528,20 @@ rerun:
goto out;
dprintk(KERN_WARNING "%u: <%s>%s=<%s>\n", __LINE__, left_word,
is_not ? "!" : "", right_word);
+ if (!strcmp(left_word, "grant_log")) {
+ if (entry) {
+ if (is_not ||
+ entry->grant_log != TOMOYO_GRANTLOG_AUTO)
+ goto out;
+ else if (!strcmp(right_word, "yes"))
+ entry->grant_log = TOMOYO_GRANTLOG_YES;
+ else if (!strcmp(right_word, "no"))
+ entry->grant_log = TOMOYO_GRANTLOG_NO;
+ else
+ goto out;
+ }
+ continue;
+ }
if (!strncmp(left_word, "exec.argv[", 10)) {
if (!argv) {
e.argc++;
@@ -593,8 +649,9 @@ store_value:
+ e.envc * sizeof(struct tomoyo_envp);
entry = kzalloc(e.size, GFP_NOFS);
if (!entry)
- return NULL;
+ goto out2;
*entry = e;
+ e.transit = NULL;
condp = (struct tomoyo_condition_element *) (entry + 1);
numbers_p = (struct tomoyo_number_union *) (condp + e.condc);
names_p = (struct tomoyo_name_union *) (numbers_p + e.numbers_count);
@@ -621,6 +678,8 @@ out:
tomoyo_del_condition(&entry->head.list);
kfree(entry);
}
+out2:
+ tomoyo_put_name(e.transit);
return NULL;
}
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index cd0f92d88bb..9027ac1534a 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -39,6 +39,8 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
if (mutex_lock_interruptible(&tomoyo_policy_lock))
return -ENOMEM;
list_for_each_entry_rcu(entry, list, list) {
+ if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
+ continue;
if (!check_duplicate(entry, new_entry))
continue;
entry->is_deleted = param->is_delete;
@@ -102,10 +104,21 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
new_entry->cond = tomoyo_get_condition(param);
if (!new_entry->cond)
return -EINVAL;
+ /*
+ * Domain transition preference is allowed for only
+ * "file execute" entries.
+ */
+ if (new_entry->cond->transit &&
+ !(new_entry->type == TOMOYO_TYPE_PATH_ACL &&
+ container_of(new_entry, struct tomoyo_path_acl, head)
+ ->perm == 1 << TOMOYO_TYPE_EXECUTE))
+ goto out;
}
if (mutex_lock_interruptible(&tomoyo_policy_lock))
goto out;
list_for_each_entry_rcu(entry, list, list) {
+ if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
+ continue;
if (!tomoyo_same_acl_head(entry, new_entry) ||
!check_duplicate(entry, new_entry))
continue;
@@ -157,6 +170,7 @@ retry:
continue;
if (!tomoyo_condition(r, ptr->cond))
continue;
+ r->matched_acl = ptr;
r->granted = true;
return;
}
@@ -501,7 +515,8 @@ struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname,
* that domain. Do not perform domain transition if
* profile for that domain is not yet created.
*/
- if (!entry->ns->profile_ptr[entry->profile])
+ if (tomoyo_policy_loaded &&
+ !entry->ns->profile_ptr[entry->profile])
return NULL;
}
return entry;
@@ -557,12 +572,99 @@ out:
tomoyo_write_log(&r, "use_profile %u\n",
entry->profile);
tomoyo_write_log(&r, "use_group %u\n", entry->group);
+ tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES);
}
}
return entry;
}
/**
+ * tomoyo_environ - Check permission for environment variable names.
+ *
+ * @ee: Pointer to "struct tomoyo_execve".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_environ(struct tomoyo_execve *ee)
+{
+ struct tomoyo_request_info *r = &ee->r;
+ struct linux_binprm *bprm = ee->bprm;
+ /* env_page.data is allocated by tomoyo_dump_page(). */
+ struct tomoyo_page_dump env_page = { };
+ char *arg_ptr; /* Size is TOMOYO_EXEC_TMPSIZE bytes */
+ int arg_len = 0;
+ unsigned long pos = bprm->p;
+ int offset = pos % PAGE_SIZE;
+ int argv_count = bprm->argc;
+ int envp_count = bprm->envc;
+ int error = -ENOMEM;
+
+ ee->r.type = TOMOYO_MAC_ENVIRON;
+ ee->r.profile = r->domain->profile;
+ ee->r.mode = tomoyo_get_mode(r->domain->ns, ee->r.profile,
+ TOMOYO_MAC_ENVIRON);
+ if (!r->mode || !envp_count)
+ return 0;
+ arg_ptr = kzalloc(TOMOYO_EXEC_TMPSIZE, GFP_NOFS);
+ if (!arg_ptr)
+ goto out;
+ while (error == -ENOMEM) {
+ if (!tomoyo_dump_page(bprm, pos, &env_page))
+ goto out;
+ pos += PAGE_SIZE - offset;
+ /* Read. */
+ while (argv_count && offset < PAGE_SIZE) {
+ if (!env_page.data[offset++])
+ argv_count--;
+ }
+ if (argv_count) {
+ offset = 0;
+ continue;
+ }
+ while (offset < PAGE_SIZE) {
+ const unsigned char c = env_page.data[offset++];
+
+ if (c && arg_len < TOMOYO_EXEC_TMPSIZE - 10) {
+ if (c == '=') {
+ arg_ptr[arg_len++] = '\0';
+ } else if (c == '\\') {
+ arg_ptr[arg_len++] = '\\';
+ arg_ptr[arg_len++] = '\\';
+ } else if (c > ' ' && c < 127) {
+ arg_ptr[arg_len++] = c;
+ } else {
+ arg_ptr[arg_len++] = '\\';
+ arg_ptr[arg_len++] = (c >> 6) + '0';
+ arg_ptr[arg_len++]
+ = ((c >> 3) & 7) + '0';
+ arg_ptr[arg_len++] = (c & 7) + '0';
+ }
+ } else {
+ arg_ptr[arg_len] = '\0';
+ }
+ if (c)
+ continue;
+ if (tomoyo_env_perm(r, arg_ptr)) {
+ error = -EPERM;
+ break;
+ }
+ if (!--envp_count) {
+ error = 0;
+ break;
+ }
+ arg_len = 0;
+ }
+ offset = 0;
+ }
+out:
+ if (r->mode != TOMOYO_CONFIG_ENFORCING)
+ error = 0;
+ kfree(env_page.data);
+ kfree(arg_ptr);
+ return error;
+}
+
+/**
* tomoyo_find_next_domain - Find a domain.
*
* @bprm: Pointer to "struct linux_binprm".
@@ -577,10 +679,11 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
struct tomoyo_domain_info *domain = NULL;
const char *original_name = bprm->filename;
int retval = -ENOMEM;
- bool need_kfree = false;
bool reject_on_transition_failure = false;
- struct tomoyo_path_info rn = { }; /* real name */
+ const struct tomoyo_path_info *candidate;
+ struct tomoyo_path_info exename;
struct tomoyo_execve *ee = kzalloc(sizeof(*ee), GFP_NOFS);
+
if (!ee)
return -ENOMEM;
ee->tmp = kzalloc(TOMOYO_EXEC_TMPSIZE, GFP_NOFS);
@@ -594,40 +697,32 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
ee->bprm = bprm;
ee->r.obj = &ee->obj;
ee->obj.path1 = bprm->file->f_path;
- retry:
- if (need_kfree) {
- kfree(rn.name);
- need_kfree = false;
- }
/* Get symlink's pathname of program. */
retval = -ENOENT;
- rn.name = tomoyo_realpath_nofollow(original_name);
- if (!rn.name)
+ exename.name = tomoyo_realpath_nofollow(original_name);
+ if (!exename.name)
goto out;
- tomoyo_fill_path_info(&rn);
- need_kfree = true;
-
+ tomoyo_fill_path_info(&exename);
+retry:
/* Check 'aggregator' directive. */
{
struct tomoyo_aggregator *ptr;
struct list_head *list =
&old_domain->ns->policy_list[TOMOYO_ID_AGGREGATOR];
/* Check 'aggregator' directive. */
+ candidate = &exename;
list_for_each_entry_rcu(ptr, list, head.list) {
if (ptr->head.is_deleted ||
- !tomoyo_path_matches_pattern(&rn,
+ !tomoyo_path_matches_pattern(&exename,
ptr->original_name))
continue;
- kfree(rn.name);
- need_kfree = false;
- /* This is OK because it is read only. */
- rn = *ptr->aggregated_name;
+ candidate = ptr->aggregated_name;
break;
}
}
/* Check execute permission. */
- retval = tomoyo_path_permission(&ee->r, TOMOYO_TYPE_EXECUTE, &rn);
+ retval = tomoyo_execute_permission(&ee->r, candidate);
if (retval == TOMOYO_RETRY_REQUEST)
goto retry;
if (retval < 0)
@@ -638,20 +733,51 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
* wildcard) rather than the pathname passed to execve()
* (which never contains wildcard).
*/
- if (ee->r.param.path.matched_path) {
- if (need_kfree)
- kfree(rn.name);
- need_kfree = false;
- /* This is OK because it is read only. */
- rn = *ee->r.param.path.matched_path;
- }
+ if (ee->r.param.path.matched_path)
+ candidate = ee->r.param.path.matched_path;
- /* Calculate domain to transit to. */
+ /*
+ * Check for domain transition preference if "file execute" matched.
+ * If preference is given, make do_execve() fail if domain transition
+ * has failed, for domain transition preference should be used with
+ * destination domain defined.
+ */
+ if (ee->transition) {
+ const char *domainname = ee->transition->name;
+ reject_on_transition_failure = true;
+ if (!strcmp(domainname, "keep"))
+ goto force_keep_domain;
+ if (!strcmp(domainname, "child"))
+ goto force_child_domain;
+ if (!strcmp(domainname, "reset"))
+ goto force_reset_domain;
+ if (!strcmp(domainname, "initialize"))
+ goto force_initialize_domain;
+ if (!strcmp(domainname, "parent")) {
+ char *cp;
+ strncpy(ee->tmp, old_domain->domainname->name,
+ TOMOYO_EXEC_TMPSIZE - 1);
+ cp = strrchr(ee->tmp, ' ');
+ if (cp)
+ *cp = '\0';
+ } else if (*domainname == '<')
+ strncpy(ee->tmp, domainname, TOMOYO_EXEC_TMPSIZE - 1);
+ else
+ snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
+ old_domain->domainname->name, domainname);
+ goto force_jump_domain;
+ }
+ /*
+ * No domain transition preference specified.
+ * Calculate domain to transit to.
+ */
switch (tomoyo_transition_type(old_domain->ns, old_domain->domainname,
- &rn)) {
+ candidate)) {
case TOMOYO_TRANSITION_CONTROL_RESET:
+force_reset_domain:
/* Transit to the root of specified namespace. */
- snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "<%s>", rn.name);
+ snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "<%s>",
+ candidate->name);
/*
* Make do_execve() fail if domain transition across namespaces
* has failed.
@@ -659,11 +785,13 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
reject_on_transition_failure = true;
break;
case TOMOYO_TRANSITION_CONTROL_INITIALIZE:
+force_initialize_domain:
/* Transit to the child of current namespace's root. */
snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
- old_domain->ns->name, rn.name);
+ old_domain->ns->name, candidate->name);
break;
case TOMOYO_TRANSITION_CONTROL_KEEP:
+force_keep_domain:
/* Keep current domain. */
domain = old_domain;
break;
@@ -677,13 +805,15 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
* before /sbin/init.
*/
domain = old_domain;
- } else {
- /* Normal domain transition. */
- snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
- old_domain->domainname->name, rn.name);
+ break;
}
+force_child_domain:
+ /* Normal domain transition. */
+ snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
+ old_domain->domainname->name, candidate->name);
break;
}
+force_jump_domain:
if (!domain)
domain = tomoyo_assign_domain(ee->tmp, true);
if (domain)
@@ -711,8 +841,11 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
/* Update reference count on "struct tomoyo_domain_info". */
atomic_inc(&domain->users);
bprm->cred->security = domain;
- if (need_kfree)
- kfree(rn.name);
+ kfree(exename.name);
+ if (!retval) {
+ ee->r.domain = domain;
+ retval = tomoyo_environ(ee);
+ }
kfree(ee->tmp);
kfree(ee->dump.data);
kfree(ee);
@@ -732,7 +865,8 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
struct tomoyo_page_dump *dump)
{
struct page *page;
- /* dump->data is released by tomoyo_finish_execve(). */
+
+ /* dump->data is released by tomoyo_find_next_domain(). */
if (!dump->data) {
dump->data = kzalloc(PAGE_SIZE, GFP_NOFS);
if (!dump->data)
@@ -753,6 +887,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
* So do I.
*/
char *kaddr = kmap_atomic(page, KM_USER0);
+
dump->page = page;
memcpy(dump->data + offset, kaddr + offset,
PAGE_SIZE - offset);
diff --git a/security/tomoyo/environ.c b/security/tomoyo/environ.c
new file mode 100644
index 00000000000..ad4c6e18a43
--- /dev/null
+++ b/security/tomoyo/environ.c
@@ -0,0 +1,122 @@
+/*
+ * security/tomoyo/environ.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+
+/**
+ * tomoyo_check_env_acl - Check permission for environment variable's name.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_env_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_env_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+
+ return tomoyo_path_matches_pattern(r->param.environ.name, acl->env);
+}
+
+/**
+ * tomoyo_audit_env_log - Audit environment variable name log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_env_log(struct tomoyo_request_info *r)
+{
+ return tomoyo_supervisor(r, "misc env %s\n",
+ r->param.environ.name->name);
+}
+
+/**
+ * tomoyo_env_perm - Check permission for environment variable's name.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @env: The name of environment variable.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env)
+{
+ struct tomoyo_path_info environ;
+ int error;
+
+ if (!env || !*env)
+ return 0;
+ environ.name = env;
+ tomoyo_fill_path_info(&environ);
+ r->param_type = TOMOYO_TYPE_ENV_ACL;
+ r->param.environ.name = &environ;
+ do {
+ tomoyo_check_acl(r, tomoyo_check_env_acl);
+ error = tomoyo_audit_env_log(r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ return error;
+}
+
+/**
+ * tomoyo_same_env_acl - Check for duplicated "struct tomoyo_env_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_env_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_env_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_env_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return p1->env == p2->env;
+}
+
+/**
+ * tomoyo_write_env - Write "struct tomoyo_env_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_env(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_env_acl e = { .head.type = TOMOYO_TYPE_ENV_ACL };
+ int error = -ENOMEM;
+ const char *data = tomoyo_read_token(param);
+
+ if (!tomoyo_correct_word(data) || strchr(data, '='))
+ return -EINVAL;
+ e.env = tomoyo_get_name(data);
+ if (!e.env)
+ return error;
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_env_acl, NULL);
+ tomoyo_put_name(e.env);
+ return error;
+}
+
+/**
+ * tomoyo_write_misc - Update environment variable list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_write_misc(struct tomoyo_acl_param *param)
+{
+ if (tomoyo_str_starts(&param->data, "env "))
+ return tomoyo_write_env(param);
+ return -EINVAL;
+}
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index 743c35f5084..40039079074 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -555,8 +555,8 @@ static int tomoyo_update_path2_acl(const u8 perm,
*
* Caller holds tomoyo_read_lock().
*/
-int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation,
- const struct tomoyo_path_info *filename)
+static int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation,
+ const struct tomoyo_path_info *filename)
{
int error;
@@ -570,16 +570,42 @@ int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation,
do {
tomoyo_check_acl(r, tomoyo_check_path_acl);
error = tomoyo_audit_path_log(r);
- /*
- * Do not retry for execute request, for alias may have
- * changed.
- */
- } while (error == TOMOYO_RETRY_REQUEST &&
- operation != TOMOYO_TYPE_EXECUTE);
+ } while (error == TOMOYO_RETRY_REQUEST);
return error;
}
/**
+ * tomoyo_execute_permission - Check permission for execute operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @filename: Filename to check.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_execute_permission(struct tomoyo_request_info *r,
+ const struct tomoyo_path_info *filename)
+{
+ /*
+ * Unlike other permission checks, this check is done regardless of
+ * profile mode settings in order to check for domain transition
+ * preference.
+ */
+ r->type = TOMOYO_MAC_FILE_EXECUTE;
+ r->mode = tomoyo_get_mode(r->domain->ns, r->profile, r->type);
+ r->param_type = TOMOYO_TYPE_PATH_ACL;
+ r->param.path.filename = filename;
+ r->param.path.operation = TOMOYO_TYPE_EXECUTE;
+ tomoyo_check_acl(r, tomoyo_check_path_acl);
+ r->ee->transition = r->matched_acl && r->matched_acl->cond ?
+ r->matched_acl->cond->transit : NULL;
+ if (r->mode != TOMOYO_CONFIG_DISABLED)
+ return tomoyo_audit_path_log(r);
+ return 0;
+}
+
+/**
* tomoyo_same_path_number_acl - Check for duplicated "struct tomoyo_path_number_acl" entry.
*
* @a: Pointer to "struct tomoyo_acl_info".
diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c
index ae135fbbbe9..986a6a75686 100644
--- a/security/tomoyo/gc.c
+++ b/security/tomoyo/gc.c
@@ -8,36 +8,26 @@
#include <linux/kthread.h>
#include <linux/slab.h>
+/**
+ * tomoyo_memory_free - Free memory for elements.
+ *
+ * @ptr: Pointer to allocated memory.
+ *
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+static inline void tomoyo_memory_free(void *ptr)
+{
+ tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= ksize(ptr);
+ kfree(ptr);
+}
+
/* The list for "struct tomoyo_io_buffer". */
static LIST_HEAD(tomoyo_io_buffer_list);
/* Lock for protecting tomoyo_io_buffer_list. */
static DEFINE_SPINLOCK(tomoyo_io_buffer_list_lock);
-/* Size of an element. */
-static const u8 tomoyo_element_size[TOMOYO_MAX_POLICY] = {
- [TOMOYO_ID_GROUP] = sizeof(struct tomoyo_group),
- [TOMOYO_ID_PATH_GROUP] = sizeof(struct tomoyo_path_group),
- [TOMOYO_ID_NUMBER_GROUP] = sizeof(struct tomoyo_number_group),
- [TOMOYO_ID_AGGREGATOR] = sizeof(struct tomoyo_aggregator),
- [TOMOYO_ID_TRANSITION_CONTROL] =
- sizeof(struct tomoyo_transition_control),
- [TOMOYO_ID_MANAGER] = sizeof(struct tomoyo_manager),
- /* [TOMOYO_ID_CONDITION] = "struct tomoyo_condition"->size, */
- /* [TOMOYO_ID_NAME] = "struct tomoyo_name"->size, */
- /* [TOMOYO_ID_ACL] =
- tomoyo_acl_size["struct tomoyo_acl_info"->type], */
- [TOMOYO_ID_DOMAIN] = sizeof(struct tomoyo_domain_info),
-};
-
-/* Size of a domain ACL element. */
-static const u8 tomoyo_acl_size[] = {
- [TOMOYO_TYPE_PATH_ACL] = sizeof(struct tomoyo_path_acl),
- [TOMOYO_TYPE_PATH2_ACL] = sizeof(struct tomoyo_path2_acl),
- [TOMOYO_TYPE_PATH_NUMBER_ACL] = sizeof(struct tomoyo_path_number_acl),
- [TOMOYO_TYPE_MKDEV_ACL] = sizeof(struct tomoyo_mkdev_acl),
- [TOMOYO_TYPE_MOUNT_ACL] = sizeof(struct tomoyo_mount_acl),
-};
-
/**
* tomoyo_struct_used_by_io_buffer - Check whether the list element is used by /sys/kernel/security/tomoyo/ users or not.
*
@@ -55,15 +45,11 @@ static bool tomoyo_struct_used_by_io_buffer(const struct list_head *element)
list_for_each_entry(head, &tomoyo_io_buffer_list, list) {
head->users++;
spin_unlock(&tomoyo_io_buffer_list_lock);
- if (mutex_lock_interruptible(&head->io_sem)) {
- in_use = true;
- goto out;
- }
+ mutex_lock(&head->io_sem);
if (head->r.domain == element || head->r.group == element ||
head->r.acl == element || &head->w.domain->list == element)
in_use = true;
mutex_unlock(&head->io_sem);
-out:
spin_lock(&tomoyo_io_buffer_list_lock);
head->users--;
if (in_use)
@@ -77,15 +63,14 @@ out:
* tomoyo_name_used_by_io_buffer - Check whether the string is used by /sys/kernel/security/tomoyo/ users or not.
*
* @string: String to check.
- * @size: Memory allocated for @string .
*
* Returns true if @string is used by /sys/kernel/security/tomoyo/ users,
* false otherwise.
*/
-static bool tomoyo_name_used_by_io_buffer(const char *string,
- const size_t size)
+static bool tomoyo_name_used_by_io_buffer(const char *string)
{
struct tomoyo_io_buffer *head;
+ const size_t size = strlen(string) + 1;
bool in_use = false;
spin_lock(&tomoyo_io_buffer_list_lock);
@@ -93,10 +78,7 @@ static bool tomoyo_name_used_by_io_buffer(const char *string,
int i;
head->users++;
spin_unlock(&tomoyo_io_buffer_list_lock);
- if (mutex_lock_interruptible(&head->io_sem)) {
- in_use = true;
- goto out;
- }
+ mutex_lock(&head->io_sem);
for (i = 0; i < TOMOYO_MAX_IO_READ_QUEUE; i++) {
const char *w = head->r.w[i];
if (w < string || w > string + size)
@@ -105,7 +87,6 @@ static bool tomoyo_name_used_by_io_buffer(const char *string,
break;
}
mutex_unlock(&head->io_sem);
-out:
spin_lock(&tomoyo_io_buffer_list_lock);
head->users--;
if (in_use)
@@ -115,84 +96,6 @@ out:
return in_use;
}
-/* Structure for garbage collection. */
-struct tomoyo_gc {
- struct list_head list;
- enum tomoyo_policy_id type;
- size_t size;
- struct list_head *element;
-};
-/* List of entries to be deleted. */
-static LIST_HEAD(tomoyo_gc_list);
-/* Length of tomoyo_gc_list. */
-static int tomoyo_gc_list_len;
-
-/**
- * tomoyo_add_to_gc - Add an entry to to be deleted list.
- *
- * @type: One of values in "enum tomoyo_policy_id".
- * @element: Pointer to "struct list_head".
- *
- * Returns true on success, false otherwise.
- *
- * Caller holds tomoyo_policy_lock mutex.
- *
- * Adding an entry needs kmalloc(). Thus, if we try to add thousands of
- * entries at once, it will take too long time. Thus, do not add more than 128
- * entries per a scan. But to be able to handle worst case where all entries
- * are in-use, we accept one more entry per a scan.
- *
- * If we use singly linked list using "struct list_head"->prev (which is
- * LIST_POISON2), we can avoid kmalloc().
- */
-static bool tomoyo_add_to_gc(const int type, struct list_head *element)
-{
- struct tomoyo_gc *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry)
- return false;
- entry->type = type;
- if (type == TOMOYO_ID_ACL)
- entry->size = tomoyo_acl_size[
- container_of(element,
- typeof(struct tomoyo_acl_info),
- list)->type];
- else if (type == TOMOYO_ID_NAME)
- entry->size = strlen(container_of(element,
- typeof(struct tomoyo_name),
- head.list)->entry.name) + 1;
- else if (type == TOMOYO_ID_CONDITION)
- entry->size =
- container_of(element, typeof(struct tomoyo_condition),
- head.list)->size;
- else
- entry->size = tomoyo_element_size[type];
- entry->element = element;
- list_add(&entry->list, &tomoyo_gc_list);
- list_del_rcu(element);
- return tomoyo_gc_list_len++ < 128;
-}
-
-/**
- * tomoyo_element_linked_by_gc - Validate next element of an entry.
- *
- * @element: Pointer to an element.
- * @size: Size of @element in byte.
- *
- * Returns true if @element is linked by other elements in the garbage
- * collector's queue, false otherwise.
- */
-static bool tomoyo_element_linked_by_gc(const u8 *element, const size_t size)
-{
- struct tomoyo_gc *p;
- list_for_each_entry(p, &tomoyo_gc_list, list) {
- const u8 *ptr = (const u8 *) p->element->next;
- if (ptr < element || element + size < ptr)
- continue;
- return true;
- }
- return false;
-}
-
/**
* tomoyo_del_transition_control - Delete members in "struct tomoyo_transition_control".
*
@@ -200,7 +103,7 @@ static bool tomoyo_element_linked_by_gc(const u8 *element, const size_t size)
*
* Returns nothing.
*/
-static void tomoyo_del_transition_control(struct list_head *element)
+static inline void tomoyo_del_transition_control(struct list_head *element)
{
struct tomoyo_transition_control *ptr =
container_of(element, typeof(*ptr), head.list);
@@ -215,7 +118,7 @@ static void tomoyo_del_transition_control(struct list_head *element)
*
* Returns nothing.
*/
-static void tomoyo_del_aggregator(struct list_head *element)
+static inline void tomoyo_del_aggregator(struct list_head *element)
{
struct tomoyo_aggregator *ptr =
container_of(element, typeof(*ptr), head.list);
@@ -230,7 +133,7 @@ static void tomoyo_del_aggregator(struct list_head *element)
*
* Returns nothing.
*/
-static void tomoyo_del_manager(struct list_head *element)
+static inline void tomoyo_del_manager(struct list_head *element)
{
struct tomoyo_manager *ptr =
container_of(element, typeof(*ptr), head.list);
@@ -293,6 +196,38 @@ static void tomoyo_del_acl(struct list_head *element)
tomoyo_put_number_union(&entry->flags);
}
break;
+ case TOMOYO_TYPE_ENV_ACL:
+ {
+ struct tomoyo_env_acl *entry =
+ container_of(acl, typeof(*entry), head);
+
+ tomoyo_put_name(entry->env);
+ }
+ break;
+ case TOMOYO_TYPE_INET_ACL:
+ {
+ struct tomoyo_inet_acl *entry =
+ container_of(acl, typeof(*entry), head);
+
+ tomoyo_put_group(entry->address.group);
+ tomoyo_put_number_union(&entry->port);
+ }
+ break;
+ case TOMOYO_TYPE_UNIX_ACL:
+ {
+ struct tomoyo_unix_acl *entry =
+ container_of(acl, typeof(*entry), head);
+
+ tomoyo_put_name_union(&entry->name);
+ }
+ break;
+ case TOMOYO_TYPE_MANUAL_TASK_ACL:
+ {
+ struct tomoyo_task_acl *entry =
+ container_of(acl, typeof(*entry), head);
+ tomoyo_put_name(entry->domainname);
+ }
+ break;
}
}
@@ -301,44 +236,26 @@ static void tomoyo_del_acl(struct list_head *element)
*
* @element: Pointer to "struct list_head".
*
- * Returns true if deleted, false otherwise.
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
*/
-static bool tomoyo_del_domain(struct list_head *element)
+static inline void tomoyo_del_domain(struct list_head *element)
{
struct tomoyo_domain_info *domain =
container_of(element, typeof(*domain), list);
struct tomoyo_acl_info *acl;
struct tomoyo_acl_info *tmp;
/*
- * Since we don't protect whole execve() operation using SRCU,
- * we need to recheck domain->users at this point.
- *
- * (1) Reader starts SRCU section upon execve().
- * (2) Reader traverses tomoyo_domain_list and finds this domain.
- * (3) Writer marks this domain as deleted.
- * (4) Garbage collector removes this domain from tomoyo_domain_list
- * because this domain is marked as deleted and used by nobody.
- * (5) Reader saves reference to this domain into
- * "struct linux_binprm"->cred->security .
- * (6) Reader finishes SRCU section, although execve() operation has
- * not finished yet.
- * (7) Garbage collector waits for SRCU synchronization.
- * (8) Garbage collector kfree() this domain because this domain is
- * used by nobody.
- * (9) Reader finishes execve() operation and restores this domain from
- * "struct linux_binprm"->cred->security.
- *
- * By updating domain->users at (5), we can solve this race problem
- * by rechecking domain->users at (8).
+ * Since this domain is referenced from neither
+ * "struct tomoyo_io_buffer" nor "struct cred"->security, we can delete
+ * elements without checking for is_deleted flag.
*/
- if (atomic_read(&domain->users))
- return false;
list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) {
tomoyo_del_acl(&acl->list);
tomoyo_memory_free(acl);
}
tomoyo_put_name(domain->domainname);
- return true;
}
/**
@@ -387,10 +304,9 @@ void tomoyo_del_condition(struct list_head *element)
*
* Returns nothing.
*/
-static void tomoyo_del_name(struct list_head *element)
+static inline void tomoyo_del_name(struct list_head *element)
{
- const struct tomoyo_name *ptr =
- container_of(element, typeof(*ptr), head.list);
+ /* Nothing to do. */
}
/**
@@ -400,7 +316,7 @@ static void tomoyo_del_name(struct list_head *element)
*
* Returns nothing.
*/
-static void tomoyo_del_path_group(struct list_head *element)
+static inline void tomoyo_del_path_group(struct list_head *element)
{
struct tomoyo_path_group *member =
container_of(element, typeof(*member), head.list);
@@ -414,7 +330,7 @@ static void tomoyo_del_path_group(struct list_head *element)
*
* Returns nothing.
*/
-static void tomoyo_del_group(struct list_head *element)
+static inline void tomoyo_del_group(struct list_head *element)
{
struct tomoyo_group *group =
container_of(element, typeof(*group), head.list);
@@ -422,16 +338,128 @@ static void tomoyo_del_group(struct list_head *element)
}
/**
+ * tomoyo_del_address_group - Delete members in "struct tomoyo_address_group".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_address_group(struct list_head *element)
+{
+ /* Nothing to do. */
+}
+
+/**
* tomoyo_del_number_group - Delete members in "struct tomoyo_number_group".
*
* @element: Pointer to "struct list_head".
*
* Returns nothing.
*/
-static void tomoyo_del_number_group(struct list_head *element)
+static inline void tomoyo_del_number_group(struct list_head *element)
{
- struct tomoyo_number_group *member =
- container_of(element, typeof(*member), head.list);
+ /* Nothing to do. */
+}
+
+/**
+ * tomoyo_try_to_gc - Try to kfree() an entry.
+ *
+ * @type: One of values in "enum tomoyo_policy_id".
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+static void tomoyo_try_to_gc(const enum tomoyo_policy_id type,
+ struct list_head *element)
+{
+ /*
+ * __list_del_entry() guarantees that the list element became no longer
+ * reachable from the list which the element was originally on (e.g.
+ * tomoyo_domain_list). Also, synchronize_srcu() guarantees that the
+ * list element became no longer referenced by syscall users.
+ */
+ __list_del_entry(element);
+ mutex_unlock(&tomoyo_policy_lock);
+ synchronize_srcu(&tomoyo_ss);
+ /*
+ * However, there are two users which may still be using the list
+ * element. We need to defer until both users forget this element.
+ *
+ * Don't kfree() until "struct tomoyo_io_buffer"->r.{domain,group,acl}
+ * and "struct tomoyo_io_buffer"->w.domain forget this element.
+ */
+ if (tomoyo_struct_used_by_io_buffer(element))
+ goto reinject;
+ switch (type) {
+ case TOMOYO_ID_TRANSITION_CONTROL:
+ tomoyo_del_transition_control(element);
+ break;
+ case TOMOYO_ID_MANAGER:
+ tomoyo_del_manager(element);
+ break;
+ case TOMOYO_ID_AGGREGATOR:
+ tomoyo_del_aggregator(element);
+ break;
+ case TOMOYO_ID_GROUP:
+ tomoyo_del_group(element);
+ break;
+ case TOMOYO_ID_PATH_GROUP:
+ tomoyo_del_path_group(element);
+ break;
+ case TOMOYO_ID_ADDRESS_GROUP:
+ tomoyo_del_address_group(element);
+ break;
+ case TOMOYO_ID_NUMBER_GROUP:
+ tomoyo_del_number_group(element);
+ break;
+ case TOMOYO_ID_CONDITION:
+ tomoyo_del_condition(element);
+ break;
+ case TOMOYO_ID_NAME:
+ /*
+ * Don't kfree() until all "struct tomoyo_io_buffer"->r.w[]
+ * forget this element.
+ */
+ if (tomoyo_name_used_by_io_buffer
+ (container_of(element, typeof(struct tomoyo_name),
+ head.list)->entry.name))
+ goto reinject;
+ tomoyo_del_name(element);
+ break;
+ case TOMOYO_ID_ACL:
+ tomoyo_del_acl(element);
+ break;
+ case TOMOYO_ID_DOMAIN:
+ /*
+ * Don't kfree() until all "struct cred"->security forget this
+ * element.
+ */
+ if (atomic_read(&container_of
+ (element, typeof(struct tomoyo_domain_info),
+ list)->users))
+ goto reinject;
+ break;
+ case TOMOYO_MAX_POLICY:
+ break;
+ }
+ mutex_lock(&tomoyo_policy_lock);
+ if (type == TOMOYO_ID_DOMAIN)
+ tomoyo_del_domain(element);
+ tomoyo_memory_free(element);
+ return;
+reinject:
+ /*
+ * We can safely reinject this element here bacause
+ * (1) Appending list elements and removing list elements are protected
+ * by tomoyo_policy_lock mutex.
+ * (2) Only this function removes list elements and this function is
+ * exclusively executed by tomoyo_gc_mutex mutex.
+ * are true.
+ */
+ mutex_lock(&tomoyo_policy_lock);
+ list_add_rcu(element, element->prev);
}
/**
@@ -440,19 +468,19 @@ static void tomoyo_del_number_group(struct list_head *element)
* @id: One of values in "enum tomoyo_policy_id".
* @member_list: Pointer to "struct list_head".
*
- * Returns true if some elements are deleted, false otherwise.
+ * Returns nothing.
*/
-static bool tomoyo_collect_member(const enum tomoyo_policy_id id,
+static void tomoyo_collect_member(const enum tomoyo_policy_id id,
struct list_head *member_list)
{
struct tomoyo_acl_head *member;
- list_for_each_entry(member, member_list, list) {
+ struct tomoyo_acl_head *tmp;
+ list_for_each_entry_safe(member, tmp, member_list, list) {
if (!member->is_deleted)
continue;
- if (!tomoyo_add_to_gc(id, &member->list))
- return false;
+ member->is_deleted = TOMOYO_GC_IN_PROGRESS;
+ tomoyo_try_to_gc(id, &member->list);
}
- return true;
}
/**
@@ -460,22 +488,22 @@ static bool tomoyo_collect_member(const enum tomoyo_policy_id id,
*
* @list: Pointer to "struct list_head".
*
- * Returns true if some elements are deleted, false otherwise.
+ * Returns nothing.
*/
-static bool tomoyo_collect_acl(struct list_head *list)
+static void tomoyo_collect_acl(struct list_head *list)
{
struct tomoyo_acl_info *acl;
- list_for_each_entry(acl, list, list) {
+ struct tomoyo_acl_info *tmp;
+ list_for_each_entry_safe(acl, tmp, list, list) {
if (!acl->is_deleted)
continue;
- if (!tomoyo_add_to_gc(TOMOYO_ID_ACL, &acl->list))
- return false;
+ acl->is_deleted = TOMOYO_GC_IN_PROGRESS;
+ tomoyo_try_to_gc(TOMOYO_ID_ACL, &acl->list);
}
- return true;
}
/**
- * tomoyo_collect_entry - Scan lists for deleted elements.
+ * tomoyo_collect_entry - Try to kfree() deleted elements.
*
* Returns nothing.
*/
@@ -484,174 +512,82 @@ static void tomoyo_collect_entry(void)
int i;
enum tomoyo_policy_id id;
struct tomoyo_policy_namespace *ns;
- int idx;
- if (mutex_lock_interruptible(&tomoyo_policy_lock))
- return;
- idx = tomoyo_read_lock();
+ mutex_lock(&tomoyo_policy_lock);
{
struct tomoyo_domain_info *domain;
- list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
- if (!tomoyo_collect_acl(&domain->acl_info_list))
- goto unlock;
+ struct tomoyo_domain_info *tmp;
+ list_for_each_entry_safe(domain, tmp, &tomoyo_domain_list,
+ list) {
+ tomoyo_collect_acl(&domain->acl_info_list);
if (!domain->is_deleted || atomic_read(&domain->users))
continue;
- /*
- * Nobody is referring this domain. But somebody may
- * refer this domain after successful execve().
- * We recheck domain->users after SRCU synchronization.
- */
- if (!tomoyo_add_to_gc(TOMOYO_ID_DOMAIN, &domain->list))
- goto unlock;
+ tomoyo_try_to_gc(TOMOYO_ID_DOMAIN, &domain->list);
}
}
- list_for_each_entry_rcu(ns, &tomoyo_namespace_list, namespace_list) {
+ list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
for (id = 0; id < TOMOYO_MAX_POLICY; id++)
- if (!tomoyo_collect_member(id, &ns->policy_list[id]))
- goto unlock;
+ tomoyo_collect_member(id, &ns->policy_list[id]);
for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++)
- if (!tomoyo_collect_acl(&ns->acl_group[i]))
- goto unlock;
+ tomoyo_collect_acl(&ns->acl_group[i]);
+ }
+ {
+ struct tomoyo_shared_acl_head *ptr;
+ struct tomoyo_shared_acl_head *tmp;
+ list_for_each_entry_safe(ptr, tmp, &tomoyo_condition_list,
+ list) {
+ if (atomic_read(&ptr->users) > 0)
+ continue;
+ atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS);
+ tomoyo_try_to_gc(TOMOYO_ID_CONDITION, &ptr->list);
+ }
+ }
+ list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
for (i = 0; i < TOMOYO_MAX_GROUP; i++) {
struct list_head *list = &ns->group_list[i];
struct tomoyo_group *group;
+ struct tomoyo_group *tmp;
switch (i) {
case 0:
id = TOMOYO_ID_PATH_GROUP;
break;
- default:
+ case 1:
id = TOMOYO_ID_NUMBER_GROUP;
break;
+ default:
+ id = TOMOYO_ID_ADDRESS_GROUP;
+ break;
}
- list_for_each_entry(group, list, head.list) {
- if (!tomoyo_collect_member
- (id, &group->member_list))
- goto unlock;
+ list_for_each_entry_safe(group, tmp, list, head.list) {
+ tomoyo_collect_member(id, &group->member_list);
if (!list_empty(&group->member_list) ||
- atomic_read(&group->head.users))
+ atomic_read(&group->head.users) > 0)
continue;
- if (!tomoyo_add_to_gc(TOMOYO_ID_GROUP,
- &group->head.list))
- goto unlock;
+ atomic_set(&group->head.users,
+ TOMOYO_GC_IN_PROGRESS);
+ tomoyo_try_to_gc(TOMOYO_ID_GROUP,
+ &group->head.list);
}
}
}
- id = TOMOYO_ID_CONDITION;
- for (i = 0; i < TOMOYO_MAX_HASH + 1; i++) {
- struct list_head *list = !i ?
- &tomoyo_condition_list : &tomoyo_name_list[i - 1];
+ for (i = 0; i < TOMOYO_MAX_HASH; i++) {
+ struct list_head *list = &tomoyo_name_list[i];
struct tomoyo_shared_acl_head *ptr;
- list_for_each_entry(ptr, list, list) {
- if (atomic_read(&ptr->users))
+ struct tomoyo_shared_acl_head *tmp;
+ list_for_each_entry_safe(ptr, tmp, list, list) {
+ if (atomic_read(&ptr->users) > 0)
continue;
- if (!tomoyo_add_to_gc(id, &ptr->list))
- goto unlock;
+ atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS);
+ tomoyo_try_to_gc(TOMOYO_ID_NAME, &ptr->list);
}
- id = TOMOYO_ID_NAME;
}
-unlock:
- tomoyo_read_unlock(idx);
mutex_unlock(&tomoyo_policy_lock);
}
/**
- * tomoyo_kfree_entry - Delete entries in tomoyo_gc_list.
- *
- * Returns true if some entries were kfree()d, false otherwise.
- */
-static bool tomoyo_kfree_entry(void)
-{
- struct tomoyo_gc *p;
- struct tomoyo_gc *tmp;
- bool result = false;
-
- list_for_each_entry_safe(p, tmp, &tomoyo_gc_list, list) {
- struct list_head *element = p->element;
-
- /*
- * list_del_rcu() in tomoyo_add_to_gc() guarantees that the
- * list element became no longer reachable from the list which
- * the element was originally on (e.g. tomoyo_domain_list).
- * Also, synchronize_srcu() in tomoyo_gc_thread() guarantees
- * that the list element became no longer referenced by syscall
- * users.
- *
- * However, there are three users which may still be using the
- * list element. We need to defer until all of these users
- * forget the list element.
- *
- * Firstly, defer until "struct tomoyo_io_buffer"->r.{domain,
- * group,acl} and "struct tomoyo_io_buffer"->w.domain forget
- * the list element.
- */
- if (tomoyo_struct_used_by_io_buffer(element))
- continue;
- /*
- * Secondly, defer until all other elements in the
- * tomoyo_gc_list list forget the list element.
- */
- if (tomoyo_element_linked_by_gc((const u8 *) element, p->size))
- continue;
- switch (p->type) {
- case TOMOYO_ID_TRANSITION_CONTROL:
- tomoyo_del_transition_control(element);
- break;
- case TOMOYO_ID_AGGREGATOR:
- tomoyo_del_aggregator(element);
- break;
- case TOMOYO_ID_MANAGER:
- tomoyo_del_manager(element);
- break;
- case TOMOYO_ID_CONDITION:
- tomoyo_del_condition(element);
- break;
- case TOMOYO_ID_NAME:
- /*
- * Thirdly, defer until all "struct tomoyo_io_buffer"
- * ->r.w[] forget the list element.
- */
- if (tomoyo_name_used_by_io_buffer(
- container_of(element, typeof(struct tomoyo_name),
- head.list)->entry.name, p->size))
- continue;
- tomoyo_del_name(element);
- break;
- case TOMOYO_ID_ACL:
- tomoyo_del_acl(element);
- break;
- case TOMOYO_ID_DOMAIN:
- if (!tomoyo_del_domain(element))
- continue;
- break;
- case TOMOYO_ID_PATH_GROUP:
- tomoyo_del_path_group(element);
- break;
- case TOMOYO_ID_GROUP:
- tomoyo_del_group(element);
- break;
- case TOMOYO_ID_NUMBER_GROUP:
- tomoyo_del_number_group(element);
- break;
- case TOMOYO_MAX_POLICY:
- break;
- }
- tomoyo_memory_free(element);
- list_del(&p->list);
- kfree(p);
- tomoyo_gc_list_len--;
- result = true;
- }
- return result;
-}
-
-/**
* tomoyo_gc_thread - Garbage collector thread function.
*
* @unused: Unused.
*
- * In case OOM-killer choose this thread for termination, we create this thread
- * as a short live thread whenever /sys/kernel/security/tomoyo/ interface was
- * close()d.
- *
* Returns 0.
*/
static int tomoyo_gc_thread(void *unused)
@@ -660,13 +596,7 @@ static int tomoyo_gc_thread(void *unused)
static DEFINE_MUTEX(tomoyo_gc_mutex);
if (!mutex_trylock(&tomoyo_gc_mutex))
goto out;
- daemonize("GC for TOMOYO");
- do {
- tomoyo_collect_entry();
- if (list_empty(&tomoyo_gc_list))
- break;
- synchronize_srcu(&tomoyo_ss);
- } while (tomoyo_kfree_entry());
+ tomoyo_collect_entry();
{
struct tomoyo_io_buffer *head;
struct tomoyo_io_buffer *tmp;
diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c
index 5fb0e129840..50092534ec5 100644
--- a/security/tomoyo/group.c
+++ b/security/tomoyo/group.c
@@ -42,7 +42,26 @@ static bool tomoyo_same_number_group(const struct tomoyo_acl_head *a,
}
/**
- * tomoyo_write_group - Write "struct tomoyo_path_group"/"struct tomoyo_number_group" list.
+ * tomoyo_same_address_group - Check for duplicated "struct tomoyo_address_group" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_address_group(const struct tomoyo_acl_head *a,
+ const struct tomoyo_acl_head *b)
+{
+ const struct tomoyo_address_group *p1 = container_of(a, typeof(*p1),
+ head);
+ const struct tomoyo_address_group *p2 = container_of(b, typeof(*p2),
+ head);
+
+ return tomoyo_same_ipaddr_union(&p1->address, &p2->address);
+}
+
+/**
+ * tomoyo_write_group - Write "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list.
*
* @param: Pointer to "struct tomoyo_acl_param".
* @type: Type of this group.
@@ -77,6 +96,14 @@ int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type)
* tomoyo_put_number_union() is not needed because
* param->data[0] != '@'.
*/
+ } else {
+ struct tomoyo_address_group e = { };
+
+ if (param->data[0] == '@' ||
+ !tomoyo_parse_ipaddr_union(param, &e.address))
+ goto out;
+ error = tomoyo_update_policy(&e.head, sizeof(e), param,
+ tomoyo_same_address_group);
}
out:
tomoyo_put_group(group);
@@ -137,3 +164,35 @@ bool tomoyo_number_matches_group(const unsigned long min,
}
return matched;
}
+
+/**
+ * tomoyo_address_matches_group - Check whether the given address matches members of the given address group.
+ *
+ * @is_ipv6: True if @address is an IPv6 address.
+ * @address: An IPv4 or IPv6 address.
+ * @group: Pointer to "struct tomoyo_address_group".
+ *
+ * Returns true if @address matches addresses in @group group, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
+ const struct tomoyo_group *group)
+{
+ struct tomoyo_address_group *member;
+ bool matched = false;
+ const u8 size = is_ipv6 ? 16 : 4;
+
+ list_for_each_entry_rcu(member, &group->member_list, head.list) {
+ if (member->head.is_deleted)
+ continue;
+ if (member->address.is_ipv6 != is_ipv6)
+ continue;
+ if (memcmp(&member->address.ip[0], address, size) > 0 ||
+ memcmp(address, &member->address.ip[1], size) > 0)
+ continue;
+ matched = true;
+ break;
+ }
+ return matched;
+}
diff --git a/security/tomoyo/memory.c b/security/tomoyo/memory.c
index 7a56051146c..0e995716cc2 100644
--- a/security/tomoyo/memory.c
+++ b/security/tomoyo/memory.c
@@ -27,8 +27,6 @@ void tomoyo_warn_oom(const char *function)
panic("MAC Initialization failed.\n");
}
-/* Lock for protecting tomoyo_memory_used. */
-static DEFINE_SPINLOCK(tomoyo_policy_memory_lock);
/* Memoy currently used by policy/audit log/query. */
unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT];
/* Memory quota for "policy"/"audit log"/"query". */
@@ -42,22 +40,19 @@ unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT];
* Returns true on success, false otherwise.
*
* Returns true if @ptr is not NULL and quota not exceeded, false otherwise.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
*/
bool tomoyo_memory_ok(void *ptr)
{
if (ptr) {
const size_t s = ksize(ptr);
- bool result;
- spin_lock(&tomoyo_policy_memory_lock);
tomoyo_memory_used[TOMOYO_MEMORY_POLICY] += s;
- result = !tomoyo_memory_quota[TOMOYO_MEMORY_POLICY] ||
- tomoyo_memory_used[TOMOYO_MEMORY_POLICY] <=
- tomoyo_memory_quota[TOMOYO_MEMORY_POLICY];
- if (!result)
- tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s;
- spin_unlock(&tomoyo_policy_memory_lock);
- if (result)
+ if (!tomoyo_memory_quota[TOMOYO_MEMORY_POLICY] ||
+ tomoyo_memory_used[TOMOYO_MEMORY_POLICY] <=
+ tomoyo_memory_quota[TOMOYO_MEMORY_POLICY])
return true;
+ tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s;
}
tomoyo_warn_oom(__func__);
return false;
@@ -71,6 +66,8 @@ bool tomoyo_memory_ok(void *ptr)
*
* Returns pointer to allocated memory on success, NULL otherwise.
* @data is zero-cleared on success.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
*/
void *tomoyo_commit_ok(void *data, const unsigned int size)
{
@@ -85,20 +82,6 @@ void *tomoyo_commit_ok(void *data, const unsigned int size)
}
/**
- * tomoyo_memory_free - Free memory for elements.
- *
- * @ptr: Pointer to allocated memory.
- */
-void tomoyo_memory_free(void *ptr)
-{
- size_t s = ksize(ptr);
- spin_lock(&tomoyo_policy_memory_lock);
- tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s;
- spin_unlock(&tomoyo_policy_memory_lock);
- kfree(ptr);
-}
-
-/**
* tomoyo_get_group - Allocate memory for "struct tomoyo_path_group"/"struct tomoyo_number_group".
*
* @param: Pointer to "struct tomoyo_acl_param".
@@ -123,7 +106,8 @@ struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param,
goto out;
list = &param->ns->group_list[idx];
list_for_each_entry(group, list, head.list) {
- if (e.group_name != group->group_name)
+ if (e.group_name != group->group_name ||
+ atomic_read(&group->head.users) == TOMOYO_GC_IN_PROGRESS)
continue;
atomic_inc(&group->head.users);
found = true;
@@ -175,7 +159,8 @@ const struct tomoyo_path_info *tomoyo_get_name(const char *name)
if (mutex_lock_interruptible(&tomoyo_policy_lock))
return NULL;
list_for_each_entry(ptr, head, head.list) {
- if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name))
+ if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name) ||
+ atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS)
continue;
atomic_inc(&ptr->head.users);
goto out;
diff --git a/security/tomoyo/network.c b/security/tomoyo/network.c
new file mode 100644
index 00000000000..97527710a72
--- /dev/null
+++ b/security/tomoyo/network.c
@@ -0,0 +1,771 @@
+/*
+ * security/tomoyo/network.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/slab.h>
+
+/* Structure for holding inet domain socket's address. */
+struct tomoyo_inet_addr_info {
+ __be16 port; /* In network byte order. */
+ const __be32 *address; /* In network byte order. */
+ bool is_ipv6;
+};
+
+/* Structure for holding unix domain socket's address. */
+struct tomoyo_unix_addr_info {
+ u8 *addr; /* This may not be '\0' terminated string. */
+ unsigned int addr_len;
+};
+
+/* Structure for holding socket address. */
+struct tomoyo_addr_info {
+ u8 protocol;
+ u8 operation;
+ struct tomoyo_inet_addr_info inet;
+ struct tomoyo_unix_addr_info unix0;
+};
+
+/* String table for socket's protocols. */
+const char * const tomoyo_proto_keyword[TOMOYO_SOCK_MAX] = {
+ [SOCK_STREAM] = "stream",
+ [SOCK_DGRAM] = "dgram",
+ [SOCK_RAW] = "raw",
+ [SOCK_SEQPACKET] = "seqpacket",
+ [0] = " ", /* Dummy for avoiding NULL pointer dereference. */
+ [4] = " ", /* Dummy for avoiding NULL pointer dereference. */
+};
+
+/**
+ * tomoyo_parse_ipaddr_union - Parse an IP address.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @ptr: Pointer to "struct tomoyo_ipaddr_union".
+ *
+ * Returns true on success, false otherwise.
+ */
+bool tomoyo_parse_ipaddr_union(struct tomoyo_acl_param *param,
+ struct tomoyo_ipaddr_union *ptr)
+{
+ u8 * const min = ptr->ip[0].in6_u.u6_addr8;
+ u8 * const max = ptr->ip[1].in6_u.u6_addr8;
+ char *address = tomoyo_read_token(param);
+ const char *end;
+
+ if (!strchr(address, ':') &&
+ in4_pton(address, -1, min, '-', &end) > 0) {
+ ptr->is_ipv6 = false;
+ if (!*end)
+ ptr->ip[1].s6_addr32[0] = ptr->ip[0].s6_addr32[0];
+ else if (*end++ != '-' ||
+ in4_pton(end, -1, max, '\0', &end) <= 0 || *end)
+ return false;
+ return true;
+ }
+ if (in6_pton(address, -1, min, '-', &end) > 0) {
+ ptr->is_ipv6 = true;
+ if (!*end)
+ memmove(max, min, sizeof(u16) * 8);
+ else if (*end++ != '-' ||
+ in6_pton(end, -1, max, '\0', &end) <= 0 || *end)
+ return false;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * tomoyo_print_ipv4 - Print an IPv4 address.
+ *
+ * @buffer: Buffer to write to.
+ * @buffer_len: Size of @buffer.
+ * @min_ip: Pointer to __be32.
+ * @max_ip: Pointer to __be32.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_ipv4(char *buffer, const unsigned int buffer_len,
+ const __be32 *min_ip, const __be32 *max_ip)
+{
+ snprintf(buffer, buffer_len, "%pI4%c%pI4", min_ip,
+ *min_ip == *max_ip ? '\0' : '-', max_ip);
+}
+
+/**
+ * tomoyo_print_ipv6 - Print an IPv6 address.
+ *
+ * @buffer: Buffer to write to.
+ * @buffer_len: Size of @buffer.
+ * @min_ip: Pointer to "struct in6_addr".
+ * @max_ip: Pointer to "struct in6_addr".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_ipv6(char *buffer, const unsigned int buffer_len,
+ const struct in6_addr *min_ip,
+ const struct in6_addr *max_ip)
+{
+ snprintf(buffer, buffer_len, "%pI6c%c%pI6c", min_ip,
+ !memcmp(min_ip, max_ip, 16) ? '\0' : '-', max_ip);
+}
+
+/**
+ * tomoyo_print_ip - Print an IP address.
+ *
+ * @buf: Buffer to write to.
+ * @size: Size of @buf.
+ * @ptr: Pointer to "struct ipaddr_union".
+ *
+ * Returns nothing.
+ */
+void tomoyo_print_ip(char *buf, const unsigned int size,
+ const struct tomoyo_ipaddr_union *ptr)
+{
+ if (ptr->is_ipv6)
+ tomoyo_print_ipv6(buf, size, &ptr->ip[0], &ptr->ip[1]);
+ else
+ tomoyo_print_ipv4(buf, size, &ptr->ip[0].s6_addr32[0],
+ &ptr->ip[1].s6_addr32[0]);
+}
+
+/*
+ * Mapping table from "enum tomoyo_network_acl_index" to
+ * "enum tomoyo_mac_index" for inet domain socket.
+ */
+static const u8 tomoyo_inet2mac
+[TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = {
+ [SOCK_STREAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_STREAM_BIND,
+ [TOMOYO_NETWORK_LISTEN] =
+ TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN,
+ [TOMOYO_NETWORK_CONNECT] =
+ TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT,
+ },
+ [SOCK_DGRAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_DGRAM_BIND,
+ [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_INET_DGRAM_SEND,
+ },
+ [SOCK_RAW] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_RAW_BIND,
+ [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_INET_RAW_SEND,
+ },
+};
+
+/*
+ * Mapping table from "enum tomoyo_network_acl_index" to
+ * "enum tomoyo_mac_index" for unix domain socket.
+ */
+static const u8 tomoyo_unix2mac
+[TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = {
+ [SOCK_STREAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND,
+ [TOMOYO_NETWORK_LISTEN] =
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN,
+ [TOMOYO_NETWORK_CONNECT] =
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT,
+ },
+ [SOCK_DGRAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND,
+ [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND,
+ },
+ [SOCK_SEQPACKET] = {
+ [TOMOYO_NETWORK_BIND] =
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND,
+ [TOMOYO_NETWORK_LISTEN] =
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN,
+ [TOMOYO_NETWORK_CONNECT] =
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT,
+ },
+};
+
+/**
+ * tomoyo_same_inet_acl - Check for duplicated "struct tomoyo_inet_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_inet_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_inet_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_inet_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return p1->protocol == p2->protocol &&
+ tomoyo_same_ipaddr_union(&p1->address, &p2->address) &&
+ tomoyo_same_number_union(&p1->port, &p2->port);
+}
+
+/**
+ * tomoyo_same_unix_acl - Check for duplicated "struct tomoyo_unix_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_unix_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_unix_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_unix_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return p1->protocol == p2->protocol &&
+ tomoyo_same_name_union(&p1->name, &p2->name);
+}
+
+/**
+ * tomoyo_merge_inet_acl - Merge duplicated "struct tomoyo_inet_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_inet_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
+{
+ u8 * const a_perm =
+ &container_of(a, struct tomoyo_inet_acl, head)->perm;
+ u8 perm = *a_perm;
+ const u8 b_perm = container_of(b, struct tomoyo_inet_acl, head)->perm;
+
+ if (is_delete)
+ perm &= ~b_perm;
+ else
+ perm |= b_perm;
+ *a_perm = perm;
+ return !perm;
+}
+
+/**
+ * tomoyo_merge_unix_acl - Merge duplicated "struct tomoyo_unix_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_unix_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
+{
+ u8 * const a_perm =
+ &container_of(a, struct tomoyo_unix_acl, head)->perm;
+ u8 perm = *a_perm;
+ const u8 b_perm = container_of(b, struct tomoyo_unix_acl, head)->perm;
+
+ if (is_delete)
+ perm &= ~b_perm;
+ else
+ perm |= b_perm;
+ *a_perm = perm;
+ return !perm;
+}
+
+/**
+ * tomoyo_write_inet_network - Write "struct tomoyo_inet_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_write_inet_network(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_inet_acl e = { .head.type = TOMOYO_TYPE_INET_ACL };
+ int error = -EINVAL;
+ u8 type;
+ const char *protocol = tomoyo_read_token(param);
+ const char *operation = tomoyo_read_token(param);
+
+ for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++)
+ if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol]))
+ break;
+ for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++)
+ if (tomoyo_permstr(operation, tomoyo_socket_keyword[type]))
+ e.perm |= 1 << type;
+ if (e.protocol == TOMOYO_SOCK_MAX || !e.perm)
+ return -EINVAL;
+ if (param->data[0] == '@') {
+ param->data++;
+ e.address.group =
+ tomoyo_get_group(param, TOMOYO_ADDRESS_GROUP);
+ if (!e.address.group)
+ return -ENOMEM;
+ } else {
+ if (!tomoyo_parse_ipaddr_union(param, &e.address))
+ goto out;
+ }
+ if (!tomoyo_parse_number_union(param, &e.port) ||
+ e.port.values[1] > 65535)
+ goto out;
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_inet_acl,
+ tomoyo_merge_inet_acl);
+out:
+ tomoyo_put_group(e.address.group);
+ tomoyo_put_number_union(&e.port);
+ return error;
+}
+
+/**
+ * tomoyo_write_unix_network - Write "struct tomoyo_unix_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_write_unix_network(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_unix_acl e = { .head.type = TOMOYO_TYPE_UNIX_ACL };
+ int error;
+ u8 type;
+ const char *protocol = tomoyo_read_token(param);
+ const char *operation = tomoyo_read_token(param);
+
+ for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++)
+ if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol]))
+ break;
+ for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++)
+ if (tomoyo_permstr(operation, tomoyo_socket_keyword[type]))
+ e.perm |= 1 << type;
+ if (e.protocol == TOMOYO_SOCK_MAX || !e.perm)
+ return -EINVAL;
+ if (!tomoyo_parse_name_union(param, &e.name))
+ return -EINVAL;
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_unix_acl,
+ tomoyo_merge_unix_acl);
+ tomoyo_put_name_union(&e.name);
+ return error;
+}
+
+/**
+ * tomoyo_audit_net_log - Audit network log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @family: Name of socket family ("inet" or "unix").
+ * @protocol: Name of protocol in @family.
+ * @operation: Name of socket operation.
+ * @address: Name of address.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_net_log(struct tomoyo_request_info *r,
+ const char *family, const u8 protocol,
+ const u8 operation, const char *address)
+{
+ return tomoyo_supervisor(r, "network %s %s %s %s\n", family,
+ tomoyo_proto_keyword[protocol],
+ tomoyo_socket_keyword[operation], address);
+}
+
+/**
+ * tomoyo_audit_inet_log - Audit INET network log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_inet_log(struct tomoyo_request_info *r)
+{
+ char buf[128];
+ int len;
+ const __be32 *address = r->param.inet_network.address;
+
+ if (r->param.inet_network.is_ipv6)
+ tomoyo_print_ipv6(buf, sizeof(buf), (const struct in6_addr *)
+ address, (const struct in6_addr *) address);
+ else
+ tomoyo_print_ipv4(buf, sizeof(buf), address, address);
+ len = strlen(buf);
+ snprintf(buf + len, sizeof(buf) - len, " %u",
+ r->param.inet_network.port);
+ return tomoyo_audit_net_log(r, "inet", r->param.inet_network.protocol,
+ r->param.inet_network.operation, buf);
+}
+
+/**
+ * tomoyo_audit_unix_log - Audit UNIX network log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_unix_log(struct tomoyo_request_info *r)
+{
+ return tomoyo_audit_net_log(r, "unix", r->param.unix_network.protocol,
+ r->param.unix_network.operation,
+ r->param.unix_network.address->name);
+}
+
+/**
+ * tomoyo_check_inet_acl - Check permission for inet domain socket operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_inet_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_inet_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+ const u8 size = r->param.inet_network.is_ipv6 ? 16 : 4;
+
+ if (!(acl->perm & (1 << r->param.inet_network.operation)) ||
+ !tomoyo_compare_number_union(r->param.inet_network.port,
+ &acl->port))
+ return false;
+ if (acl->address.group)
+ return tomoyo_address_matches_group
+ (r->param.inet_network.is_ipv6,
+ r->param.inet_network.address, acl->address.group);
+ return acl->address.is_ipv6 == r->param.inet_network.is_ipv6 &&
+ memcmp(&acl->address.ip[0],
+ r->param.inet_network.address, size) <= 0 &&
+ memcmp(r->param.inet_network.address,
+ &acl->address.ip[1], size) <= 0;
+}
+
+/**
+ * tomoyo_check_unix_acl - Check permission for unix domain socket operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_unix_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_unix_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+
+ return (acl->perm & (1 << r->param.unix_network.operation)) &&
+ tomoyo_compare_name_union(r->param.unix_network.address,
+ &acl->name);
+}
+
+/**
+ * tomoyo_inet_entry - Check permission for INET network operation.
+ *
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_inet_entry(const struct tomoyo_addr_info *address)
+{
+ const int idx = tomoyo_read_lock();
+ struct tomoyo_request_info r;
+ int error = 0;
+ const u8 type = tomoyo_inet2mac[address->protocol][address->operation];
+
+ if (type && tomoyo_init_request_info(&r, NULL, type)
+ != TOMOYO_CONFIG_DISABLED) {
+ r.param_type = TOMOYO_TYPE_INET_ACL;
+ r.param.inet_network.protocol = address->protocol;
+ r.param.inet_network.operation = address->operation;
+ r.param.inet_network.is_ipv6 = address->inet.is_ipv6;
+ r.param.inet_network.address = address->inet.address;
+ r.param.inet_network.port = ntohs(address->inet.port);
+ do {
+ tomoyo_check_acl(&r, tomoyo_check_inet_acl);
+ error = tomoyo_audit_inet_log(&r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ }
+ tomoyo_read_unlock(idx);
+ return error;
+}
+
+/**
+ * tomoyo_check_inet_address - Check permission for inet domain socket's operation.
+ *
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ * @port: Port number.
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_check_inet_address(const struct sockaddr *addr,
+ const unsigned int addr_len,
+ const u16 port,
+ struct tomoyo_addr_info *address)
+{
+ struct tomoyo_inet_addr_info *i = &address->inet;
+
+ switch (addr->sa_family) {
+ case AF_INET6:
+ if (addr_len < SIN6_LEN_RFC2133)
+ goto skip;
+ i->is_ipv6 = true;
+ i->address = (__be32 *)
+ ((struct sockaddr_in6 *) addr)->sin6_addr.s6_addr;
+ i->port = ((struct sockaddr_in6 *) addr)->sin6_port;
+ break;
+ case AF_INET:
+ if (addr_len < sizeof(struct sockaddr_in))
+ goto skip;
+ i->is_ipv6 = false;
+ i->address = (__be32 *)
+ &((struct sockaddr_in *) addr)->sin_addr;
+ i->port = ((struct sockaddr_in *) addr)->sin_port;
+ break;
+ default:
+ goto skip;
+ }
+ if (address->protocol == SOCK_RAW)
+ i->port = htons(port);
+ return tomoyo_inet_entry(address);
+skip:
+ return 0;
+}
+
+/**
+ * tomoyo_unix_entry - Check permission for UNIX network operation.
+ *
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_unix_entry(const struct tomoyo_addr_info *address)
+{
+ const int idx = tomoyo_read_lock();
+ struct tomoyo_request_info r;
+ int error = 0;
+ const u8 type = tomoyo_unix2mac[address->protocol][address->operation];
+
+ if (type && tomoyo_init_request_info(&r, NULL, type)
+ != TOMOYO_CONFIG_DISABLED) {
+ char *buf = address->unix0.addr;
+ int len = address->unix0.addr_len - sizeof(sa_family_t);
+
+ if (len <= 0) {
+ buf = "anonymous";
+ len = 9;
+ } else if (buf[0]) {
+ len = strnlen(buf, len);
+ }
+ buf = tomoyo_encode2(buf, len);
+ if (buf) {
+ struct tomoyo_path_info addr;
+
+ addr.name = buf;
+ tomoyo_fill_path_info(&addr);
+ r.param_type = TOMOYO_TYPE_UNIX_ACL;
+ r.param.unix_network.protocol = address->protocol;
+ r.param.unix_network.operation = address->operation;
+ r.param.unix_network.address = &addr;
+ do {
+ tomoyo_check_acl(&r, tomoyo_check_unix_acl);
+ error = tomoyo_audit_unix_log(&r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ kfree(buf);
+ } else
+ error = -ENOMEM;
+ }
+ tomoyo_read_unlock(idx);
+ return error;
+}
+
+/**
+ * tomoyo_check_unix_address - Check permission for unix domain socket's operation.
+ *
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_check_unix_address(struct sockaddr *addr,
+ const unsigned int addr_len,
+ struct tomoyo_addr_info *address)
+{
+ struct tomoyo_unix_addr_info *u = &address->unix0;
+
+ if (addr->sa_family != AF_UNIX)
+ return 0;
+ u->addr = ((struct sockaddr_un *) addr)->sun_path;
+ u->addr_len = addr_len;
+ return tomoyo_unix_entry(address);
+}
+
+/**
+ * tomoyo_kernel_service - Check whether I'm kernel service or not.
+ *
+ * Returns true if I'm kernel service, false otherwise.
+ */
+static bool tomoyo_kernel_service(void)
+{
+ /* Nothing to do if I am a kernel service. */
+ return segment_eq(get_fs(), KERNEL_DS);
+}
+
+/**
+ * tomoyo_sock_family - Get socket's family.
+ *
+ * @sk: Pointer to "struct sock".
+ *
+ * Returns one of PF_INET, PF_INET6, PF_UNIX or 0.
+ */
+static u8 tomoyo_sock_family(struct sock *sk)
+{
+ u8 family;
+
+ if (tomoyo_kernel_service())
+ return 0;
+ family = sk->sk_family;
+ switch (family) {
+ case PF_INET:
+ case PF_INET6:
+ case PF_UNIX:
+ return family;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * tomoyo_socket_listen_permission - Check permission for listening a socket.
+ *
+ * @sock: Pointer to "struct socket".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_listen_permission(struct socket *sock)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+ struct sockaddr_storage addr;
+ int addr_len;
+
+ if (!family || (type != SOCK_STREAM && type != SOCK_SEQPACKET))
+ return 0;
+ {
+ const int error = sock->ops->getname(sock, (struct sockaddr *)
+ &addr, &addr_len, 0);
+
+ if (error)
+ return error;
+ }
+ address.protocol = type;
+ address.operation = TOMOYO_NETWORK_LISTEN;
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address((struct sockaddr *) &addr,
+ addr_len, &address);
+ return tomoyo_check_inet_address((struct sockaddr *) &addr, addr_len,
+ 0, &address);
+}
+
+/**
+ * tomoyo_socket_connect_permission - Check permission for setting the remote address of a socket.
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_connect_permission(struct socket *sock,
+ struct sockaddr *addr, int addr_len)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+
+ if (!family)
+ return 0;
+ address.protocol = type;
+ switch (type) {
+ case SOCK_DGRAM:
+ case SOCK_RAW:
+ address.operation = TOMOYO_NETWORK_SEND;
+ break;
+ case SOCK_STREAM:
+ case SOCK_SEQPACKET:
+ address.operation = TOMOYO_NETWORK_CONNECT;
+ break;
+ default:
+ return 0;
+ }
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address(addr, addr_len, &address);
+ return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol,
+ &address);
+}
+
+/**
+ * tomoyo_socket_bind_permission - Check permission for setting the local address of a socket.
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+
+ if (!family)
+ return 0;
+ switch (type) {
+ case SOCK_STREAM:
+ case SOCK_DGRAM:
+ case SOCK_RAW:
+ case SOCK_SEQPACKET:
+ address.protocol = type;
+ address.operation = TOMOYO_NETWORK_BIND;
+ break;
+ default:
+ return 0;
+ }
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address(addr, addr_len, &address);
+ return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol,
+ &address);
+}
+
+/**
+ * tomoyo_socket_sendmsg_permission - Check permission for sending a datagram.
+ *
+ * @sock: Pointer to "struct socket".
+ * @msg: Pointer to "struct msghdr".
+ * @size: Unused.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_sendmsg_permission(struct socket *sock, struct msghdr *msg,
+ int size)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+
+ if (!msg->msg_name || !family ||
+ (type != SOCK_DGRAM && type != SOCK_RAW))
+ return 0;
+ address.protocol = type;
+ address.operation = TOMOYO_NETWORK_SEND;
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address((struct sockaddr *)
+ msg->msg_name,
+ msg->msg_namelen, &address);
+ return tomoyo_check_inet_address((struct sockaddr *) msg->msg_name,
+ msg->msg_namelen,
+ sock->sk->sk_protocol, &address);
+}
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 6c601bd300f..738bbdf8d4c 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -15,17 +15,19 @@
#include "../../fs/internal.h"
/**
- * tomoyo_encode: Convert binary string to ascii string.
+ * tomoyo_encode2 - Encode binary string to ascii string.
*
- * @str: String in binary format.
+ * @str: String in binary format.
+ * @str_len: Size of @str in byte.
*
* Returns pointer to @str in ascii format on success, NULL otherwise.
*
* This function uses kzalloc(), so caller must kfree() if this function
* didn't return NULL.
*/
-char *tomoyo_encode(const char *str)
+char *tomoyo_encode2(const char *str, int str_len)
{
+ int i;
int len = 0;
const char *p = str;
char *cp;
@@ -33,8 +35,9 @@ char *tomoyo_encode(const char *str)
if (!p)
return NULL;
- while (*p) {
- const unsigned char c = *p++;
+ for (i = 0; i < str_len; i++) {
+ const unsigned char c = p[i];
+
if (c == '\\')
len += 2;
else if (c > ' ' && c < 127)
@@ -49,8 +52,8 @@ char *tomoyo_encode(const char *str)
return NULL;
cp0 = cp;
p = str;
- while (*p) {
- const unsigned char c = *p++;
+ for (i = 0; i < str_len; i++) {
+ const unsigned char c = p[i];
if (c == '\\') {
*cp++ = '\\';
@@ -68,6 +71,21 @@ char *tomoyo_encode(const char *str)
}
/**
+ * tomoyo_encode - Encode binary string to ascii string.
+ *
+ * @str: String in binary format.
+ *
+ * Returns pointer to @str in ascii format on success, NULL otherwise.
+ *
+ * This function uses kzalloc(), so caller must kfree() if this function
+ * didn't return NULL.
+ */
+char *tomoyo_encode(const char *str)
+{
+ return str ? tomoyo_encode2(str, strlen(str)) : NULL;
+}
+
+/**
* tomoyo_get_absolute_path - Get the path of a dentry but ignores chroot'ed root.
*
* @path: Pointer to "struct path".
diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c
index a49c3bfd4dd..2672ac4f3be 100644
--- a/security/tomoyo/securityfs_if.c
+++ b/security/tomoyo/securityfs_if.c
@@ -8,6 +8,124 @@
#include "common.h"
/**
+ * tomoyo_check_task_acl - Check permission for task operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_task_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_task_acl *acl = container_of(ptr, typeof(*acl),
+ head);
+ return !tomoyo_pathcmp(r->param.task.domainname, acl->domainname);
+}
+
+/**
+ * tomoyo_write_self - write() for /sys/kernel/security/tomoyo/self_domain interface.
+ *
+ * @file: Pointer to "struct file".
+ * @buf: Domainname to transit to.
+ * @count: Size of @buf.
+ * @ppos: Unused.
+ *
+ * Returns @count on success, negative value otherwise.
+ *
+ * If domain transition was permitted but the domain transition failed, this
+ * function returns error rather than terminating current thread with SIGKILL.
+ */
+static ssize_t tomoyo_write_self(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *data;
+ int error;
+ if (!count || count >= TOMOYO_EXEC_TMPSIZE - 10)
+ return -ENOMEM;
+ data = kzalloc(count + 1, GFP_NOFS);
+ if (!data)
+ return -ENOMEM;
+ if (copy_from_user(data, buf, count)) {
+ error = -EFAULT;
+ goto out;
+ }
+ tomoyo_normalize_line(data);
+ if (tomoyo_correct_domain(data)) {
+ const int idx = tomoyo_read_lock();
+ struct tomoyo_path_info name;
+ struct tomoyo_request_info r;
+ name.name = data;
+ tomoyo_fill_path_info(&name);
+ /* Check "task manual_domain_transition" permission. */
+ tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_EXECUTE);
+ r.param_type = TOMOYO_TYPE_MANUAL_TASK_ACL;
+ r.param.task.domainname = &name;
+ tomoyo_check_acl(&r, tomoyo_check_task_acl);
+ if (!r.granted)
+ error = -EPERM;
+ else {
+ struct tomoyo_domain_info *new_domain =
+ tomoyo_assign_domain(data, true);
+ if (!new_domain) {
+ error = -ENOENT;
+ } else {
+ struct cred *cred = prepare_creds();
+ if (!cred) {
+ error = -ENOMEM;
+ } else {
+ struct tomoyo_domain_info *old_domain =
+ cred->security;
+ cred->security = new_domain;
+ atomic_inc(&new_domain->users);
+ atomic_dec(&old_domain->users);
+ commit_creds(cred);
+ error = 0;
+ }
+ }
+ }
+ tomoyo_read_unlock(idx);
+ } else
+ error = -EINVAL;
+out:
+ kfree(data);
+ return error ? error : count;
+}
+
+/**
+ * tomoyo_read_self - read() for /sys/kernel/security/tomoyo/self_domain interface.
+ *
+ * @file: Pointer to "struct file".
+ * @buf: Domainname which current thread belongs to.
+ * @count: Size of @buf.
+ * @ppos: Bytes read by now.
+ *
+ * Returns read size on success, negative value otherwise.
+ */
+static ssize_t tomoyo_read_self(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ const char *domain = tomoyo_domain()->domainname->name;
+ loff_t len = strlen(domain);
+ loff_t pos = *ppos;
+ if (pos >= len || !count)
+ return 0;
+ len -= pos;
+ if (count < len)
+ len = count;
+ if (copy_to_user(buf, domain + pos, len))
+ return -EFAULT;
+ *ppos += len;
+ return len;
+}
+
+/* Operations for /sys/kernel/security/tomoyo/self_domain interface. */
+static const struct file_operations tomoyo_self_operations = {
+ .write = tomoyo_write_self,
+ .read = tomoyo_read_self,
+};
+
+/**
* tomoyo_open - open() for /sys/kernel/security/tomoyo/ interface.
*
* @inode: Pointer to "struct inode".
@@ -135,8 +253,6 @@ static int __init tomoyo_initerface_init(void)
TOMOYO_EXCEPTIONPOLICY);
tomoyo_create_entry("audit", 0400, tomoyo_dir,
TOMOYO_AUDIT);
- tomoyo_create_entry("self_domain", 0400, tomoyo_dir,
- TOMOYO_SELFDOMAIN);
tomoyo_create_entry(".process_status", 0600, tomoyo_dir,
TOMOYO_PROCESS_STATUS);
tomoyo_create_entry("stat", 0644, tomoyo_dir,
@@ -147,6 +263,9 @@ static int __init tomoyo_initerface_init(void)
TOMOYO_MANAGER);
tomoyo_create_entry("version", 0400, tomoyo_dir,
TOMOYO_VERSION);
+ securityfs_create_file("self_domain", 0666, tomoyo_dir, NULL,
+ &tomoyo_self_operations);
+ tomoyo_load_builtin_policy();
return 0;
}
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index f776400a8f3..4b327b69174 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -442,6 +442,64 @@ static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path)
return tomoyo_path2_perm(TOMOYO_TYPE_PIVOT_ROOT, new_path, old_path);
}
+/**
+ * tomoyo_socket_listen - Check permission for listen().
+ *
+ * @sock: Pointer to "struct socket".
+ * @backlog: Backlog parameter.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_listen(struct socket *sock, int backlog)
+{
+ return tomoyo_socket_listen_permission(sock);
+}
+
+/**
+ * tomoyo_socket_connect - Check permission for connect().
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_connect(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
+{
+ return tomoyo_socket_connect_permission(sock, addr, addr_len);
+}
+
+/**
+ * tomoyo_socket_bind - Check permission for bind().
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_bind(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
+{
+ return tomoyo_socket_bind_permission(sock, addr, addr_len);
+}
+
+/**
+ * tomoyo_socket_sendmsg - Check permission for sendmsg().
+ *
+ * @sock: Pointer to "struct socket".
+ * @msg: Pointer to "struct msghdr".
+ * @size: Size of message.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
+ int size)
+{
+ return tomoyo_socket_sendmsg_permission(sock, msg, size);
+}
+
/*
* tomoyo_security_ops is a "struct security_operations" which is used for
* registering TOMOYO.
@@ -472,6 +530,10 @@ static struct security_operations tomoyo_security_ops = {
.sb_mount = tomoyo_sb_mount,
.sb_umount = tomoyo_sb_umount,
.sb_pivotroot = tomoyo_sb_pivotroot,
+ .socket_bind = tomoyo_socket_bind,
+ .socket_connect = tomoyo_socket_connect,
+ .socket_listen = tomoyo_socket_listen,
+ .socket_sendmsg = tomoyo_socket_sendmsg,
};
/* Lock for GC. */
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index c36bd1107fc..4a9b4b2eb75 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -42,6 +42,39 @@ const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX] = {
[TOMOYO_MAC_FILE_MOUNT] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_UMOUNT] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_PIVOT_ROOT] = TOMOYO_MAC_CATEGORY_FILE,
+ /* CONFIG::network group */
+ [TOMOYO_MAC_NETWORK_INET_STREAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_SEND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_RAW_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_RAW_SEND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ /* CONFIG::misc group */
+ [TOMOYO_MAC_ENVIRON] = TOMOYO_MAC_CATEGORY_MISC,
};
/**
@@ -126,6 +159,31 @@ char *tomoyo_read_token(struct tomoyo_acl_param *param)
}
/**
+ * tomoyo_get_domainname - Read a domainname from a line.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns a domainname on success, NULL otherwise.
+ */
+const struct tomoyo_path_info *tomoyo_get_domainname
+(struct tomoyo_acl_param *param)
+{
+ char *start = param->data;
+ char *pos = start;
+ while (*pos) {
+ if (*pos++ != ' ' || *pos++ == '/')
+ continue;
+ pos -= 2;
+ *pos++ = '\0';
+ break;
+ }
+ param->data = pos;
+ if (tomoyo_correct_domain(start))
+ return tomoyo_get_name(start);
+ return NULL;
+}
+
+/**
* tomoyo_parse_ulong - Parse an "unsigned long" value.
*
* @result: Pointer to "unsigned long".
@@ -920,14 +978,17 @@ int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile,
const u8 index)
{
u8 mode;
- const u8 category = TOMOYO_MAC_CATEGORY_FILE;
+ struct tomoyo_profile *p;
+
if (!tomoyo_policy_loaded)
return TOMOYO_CONFIG_DISABLED;
- mode = tomoyo_profile(ns, profile)->config[index];
+ p = tomoyo_profile(ns, profile);
+ mode = p->config[index];
if (mode == TOMOYO_CONFIG_USE_DEFAULT)
- mode = tomoyo_profile(ns, profile)->config[category];
+ mode = p->config[tomoyo_index2category[index]
+ + TOMOYO_MAX_MAC_INDEX];
if (mode == TOMOYO_CONFIG_USE_DEFAULT)
- mode = tomoyo_profile(ns, profile)->default_config;
+ mode = p->default_config;
return mode & 3;
}
@@ -996,6 +1057,17 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
perm = container_of(ptr, struct tomoyo_mkdev_acl,
head)->perm;
break;
+ case TOMOYO_TYPE_INET_ACL:
+ perm = container_of(ptr, struct tomoyo_inet_acl,
+ head)->perm;
+ break;
+ case TOMOYO_TYPE_UNIX_ACL:
+ perm = container_of(ptr, struct tomoyo_unix_acl,
+ head)->perm;
+ break;
+ case TOMOYO_TYPE_MANUAL_TASK_ACL:
+ perm = 0;
+ break;
default:
perm = 1;
}
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 86d0caf91b3..62e90b862a0 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1761,6 +1761,10 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
snd_pcm_uframes_t avail = 0;
long wait_time, tout;
+ init_waitqueue_entry(&wait, current);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&runtime->tsleep, &wait);
+
if (runtime->no_period_wakeup)
wait_time = MAX_SCHEDULE_TIMEOUT;
else {
@@ -1771,16 +1775,32 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
}
wait_time = msecs_to_jiffies(wait_time * 1000);
}
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&runtime->tsleep, &wait);
+
for (;;) {
if (signal_pending(current)) {
err = -ERESTARTSYS;
break;
}
+
+ /*
+ * We need to check if space became available already
+ * (and thus the wakeup happened already) first to close
+ * the race of space already having become available.
+ * This check must happen after been added to the waitqueue
+ * and having current state be INTERRUPTIBLE.
+ */
+ if (is_playback)
+ avail = snd_pcm_playback_avail(runtime);
+ else
+ avail = snd_pcm_capture_avail(runtime);
+ if (avail >= runtime->twake)
+ break;
snd_pcm_stream_unlock_irq(substream);
- tout = schedule_timeout_interruptible(wait_time);
+
+ tout = schedule_timeout(wait_time);
+
snd_pcm_stream_lock_irq(substream);
+ set_current_state(TASK_INTERRUPTIBLE);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_SUSPENDED:
err = -ESTRPIPE;
@@ -1806,14 +1826,9 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
err = -EIO;
break;
}
- if (is_playback)
- avail = snd_pcm_playback_avail(runtime);
- else
- avail = snd_pcm_capture_avail(runtime);
- if (avail >= runtime->twake)
- break;
}
_endloop:
+ set_current_state(TASK_RUNNING);
remove_wait_queue(&runtime->tsleep, &wait);
*availp = avail;
return err;
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index f9123f09e83..32b02d90670 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -68,6 +68,7 @@ MODULE_PARM_DESC(enable, "Enable FM801 soundcard.");
module_param_array(tea575x_tuner, int, NULL, 0444);
MODULE_PARM_DESC(tea575x_tuner, "TEA575x tuner access method (0 = auto, 1 = SF256-PCS, 2=SF256-PCP, 3=SF64-PCR, 8=disable, +16=tuner-only).");
+#define TUNER_DISABLED (1<<3)
#define TUNER_ONLY (1<<4)
#define TUNER_TYPE_MASK (~TUNER_ONLY & 0xFFFF)
@@ -1150,7 +1151,8 @@ static int snd_fm801_free(struct fm801 *chip)
__end_hw:
#ifdef CONFIG_SND_FM801_TEA575X_BOOL
- snd_tea575x_exit(&chip->tea);
+ if (!(chip->tea575x_tuner & TUNER_DISABLED))
+ snd_tea575x_exit(&chip->tea);
#endif
if (chip->irq >= 0)
free_irq(chip->irq, chip);
@@ -1236,7 +1238,6 @@ static int __devinit snd_fm801_create(struct snd_card *card,
(tea575x_tuner & TUNER_TYPE_MASK) < 4) {
if (snd_tea575x_init(&chip->tea)) {
snd_printk(KERN_ERR "TEA575x radio not found\n");
- snd_fm801_free(chip);
return -ENODEV;
}
} else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) {
@@ -1251,11 +1252,15 @@ static int __devinit snd_fm801_create(struct snd_card *card,
}
if (tea575x_tuner == 4) {
snd_printk(KERN_ERR "TEA575x radio not found\n");
- snd_fm801_free(chip);
- return -ENODEV;
+ chip->tea575x_tuner = TUNER_DISABLED;
}
}
- strlcpy(chip->tea.card, snd_fm801_tea575x_gpios[(tea575x_tuner & TUNER_TYPE_MASK) - 1].name, sizeof(chip->tea.card));
+ if (!(chip->tea575x_tuner & TUNER_DISABLED)) {
+ strlcpy(chip->tea.card,
+ snd_fm801_tea575x_gpios[(tea575x_tuner &
+ TUNER_TYPE_MASK) - 1].name,
+ sizeof(chip->tea.card));
+ }
#endif
*rchip = chip;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 3e7850c238c..f3aefef3721 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -579,9 +579,13 @@ int snd_hda_get_conn_index(struct hda_codec *codec, hda_nid_t mux,
return -1;
}
recursive++;
- for (i = 0; i < nums; i++)
+ for (i = 0; i < nums; i++) {
+ unsigned int type = get_wcaps_type(get_wcaps(codec, conn[i]));
+ if (type == AC_WID_PIN || type == AC_WID_AUD_OUT)
+ continue;
if (snd_hda_get_conn_index(codec, conn[i], nid, recursive) >= 0)
return i;
+ }
return -1;
}
EXPORT_SYMBOL_HDA(snd_hda_get_conn_index);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index be6982289c0..191284a1c0a 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1924,7 +1924,8 @@ static unsigned int azx_via_get_position(struct azx *chip,
}
static unsigned int azx_get_position(struct azx *chip,
- struct azx_dev *azx_dev)
+ struct azx_dev *azx_dev,
+ bool with_check)
{
unsigned int pos;
int stream = azx_dev->substream->stream;
@@ -1940,7 +1941,7 @@ static unsigned int azx_get_position(struct azx *chip,
default:
/* use the position buffer */
pos = le32_to_cpu(*azx_dev->posbuf);
- if (chip->position_fix[stream] == POS_FIX_AUTO) {
+ if (with_check && chip->position_fix[stream] == POS_FIX_AUTO) {
if (!pos || pos == (u32)-1) {
printk(KERN_WARNING
"hda-intel: Invalid position buffer, "
@@ -1964,7 +1965,7 @@ static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
struct azx *chip = apcm->chip;
struct azx_dev *azx_dev = get_azx_dev(substream);
return bytes_to_frames(substream->runtime,
- azx_get_position(chip, azx_dev));
+ azx_get_position(chip, azx_dev, false));
}
/*
@@ -1987,7 +1988,7 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev)
return -1; /* bogus (too early) interrupt */
stream = azx_dev->substream->stream;
- pos = azx_get_position(chip, azx_dev);
+ pos = azx_get_position(chip, azx_dev, true);
if (WARN_ONCE(!azx_dev->period_bytes,
"hda-intel: zero azx_dev->period_bytes"))
@@ -2369,6 +2370,7 @@ static int azx_dev_free(struct snd_device *device)
static struct snd_pci_quirk position_fix_list[] __devinitdata = {
SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1028, 0x02c6, "Dell Inspiron 1010", POS_FIX_LPIB),
SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index d6c93d92b55..c45f3e69bcf 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -535,7 +535,7 @@ static int add_volume(struct hda_codec *codec, const char *name,
int index, unsigned int pval, int dir,
struct snd_kcontrol **kctlp)
{
- char tmp[32];
+ char tmp[44];
struct snd_kcontrol_new knew =
HDA_CODEC_VOLUME_IDX(tmp, index, 0, 0, HDA_OUTPUT);
knew.private_value = pval;
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 7696d05b935..76752d8ea73 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3110,6 +3110,7 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
+ SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO),
SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 7cabd731716..7a73621a890 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -168,7 +168,7 @@ struct alc_spec {
unsigned int auto_mic_valid_imux:1; /* valid imux for auto-mic */
unsigned int automute:1; /* HP automute enabled */
unsigned int detect_line:1; /* Line-out detection enabled */
- unsigned int automute_lines:1; /* automute line-out as well */
+ unsigned int automute_lines:1; /* automute line-out as well; NOP when automute_hp_lo isn't set */
unsigned int automute_hp_lo:1; /* both HP and LO available */
/* other flags */
@@ -551,7 +551,7 @@ static void update_speakers(struct hda_codec *codec)
if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0] ||
spec->autocfg.line_out_pins[0] == spec->autocfg.speaker_pins[0])
return;
- if (!spec->automute_lines || !spec->automute)
+ if (!spec->automute || (spec->automute_hp_lo && !spec->automute_lines))
on = 0;
else
on = spec->jack_present;
@@ -578,6 +578,10 @@ static void alc_line_automute(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
+ /* check LO jack only when it's different from HP */
+ if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0])
+ return;
+
spec->line_jack_present =
detect_jacks(codec, ARRAY_SIZE(spec->autocfg.line_out_pins),
spec->autocfg.line_out_pins);
@@ -803,7 +807,7 @@ static int alc_automute_mode_get(struct snd_kcontrol *kcontrol,
unsigned int val;
if (!spec->automute)
val = 0;
- else if (!spec->automute_lines)
+ else if (!spec->automute_hp_lo || !spec->automute_lines)
val = 1;
else
val = 2;
@@ -824,7 +828,8 @@ static int alc_automute_mode_put(struct snd_kcontrol *kcontrol,
spec->automute = 0;
break;
case 1:
- if (spec->automute && !spec->automute_lines)
+ if (spec->automute &&
+ (!spec->automute_hp_lo || !spec->automute_lines))
return 0;
spec->automute = 1;
spec->automute_lines = 0;
@@ -1320,7 +1325,9 @@ do_sku:
* 15 : 1 --> enable the function "Mute internal speaker
* when the external headphone out jack is plugged"
*/
- if (!spec->autocfg.hp_pins[0]) {
+ if (!spec->autocfg.hp_pins[0] &&
+ !(spec->autocfg.line_out_pins[0] &&
+ spec->autocfg.line_out_type == AUTO_PIN_HP_OUT)) {
hda_nid_t nid;
tmp = (ass >> 11) & 0x3; /* HP to chassis */
if (tmp == 0)
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 5145b663ef6..987e3cf71a0 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -5630,6 +5630,7 @@ again:
switch (codec->vendor_id) {
case 0x111d76d1:
case 0x111d76d9:
+ case 0x111d76df:
case 0x111d76e5:
case 0x111d7666:
case 0x111d7667:
@@ -6573,6 +6574,7 @@ static const struct hda_codec_preset snd_hda_preset_sigmatel[] = {
{ .id = 0x111d76cc, .name = "92HD89F3", .patch = patch_stac92hd73xx },
{ .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },
{ .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx },
+ { .id = 0x111d76df, .name = "92HD93BXX", .patch = patch_stac92hd83xxx},
{ .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx},
{ .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx},
{ .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx},
diff --git a/sound/soc/blackfin/bf5xx-ad193x.c b/sound/soc/blackfin/bf5xx-ad193x.c
index a118a0fb9d8..5956584ea3a 100644
--- a/sound/soc/blackfin/bf5xx-ad193x.c
+++ b/sound/soc/blackfin/bf5xx-ad193x.c
@@ -103,7 +103,7 @@ static struct snd_soc_dai_link bf5xx_ad193x_dai[] = {
.cpu_dai_name = "bfin-tdm.0",
.codec_dai_name ="ad193x-hifi",
.platform_name = "bfin-tdm-pcm-audio",
- .codec_name = "ad193x.5",
+ .codec_name = "spi0.5",
.ops = &bf5xx_ad193x_ops,
},
{
@@ -112,7 +112,7 @@ static struct snd_soc_dai_link bf5xx_ad193x_dai[] = {
.cpu_dai_name = "bfin-tdm.1",
.codec_dai_name ="ad193x-hifi",
.platform_name = "bfin-tdm-pcm-audio",
- .codec_name = "ad193x.5",
+ .codec_name = "spi0.5",
.ops = &bf5xx_ad193x_ops,
},
};
diff --git a/sound/soc/blackfin/bf5xx-ad73311.c b/sound/soc/blackfin/bf5xx-ad73311.c
index 732a247f252..b94eb7ef7d1 100644
--- a/sound/soc/blackfin/bf5xx-ad73311.c
+++ b/sound/soc/blackfin/bf5xx-ad73311.c
@@ -128,7 +128,7 @@ static int snd_ad73311_configure(void)
return 0;
}
-static int bf5xx_probe(struct platform_device *pdev)
+static int bf5xx_probe(struct snd_soc_card *card)
{
int err;
if (gpio_request(GPIO_SE, "AD73311_SE")) {
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 84f4ad56855..9801cd7cfcb 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -431,7 +431,8 @@ static int ssm2602_set_dai_fmt(struct snd_soc_dai *codec_dai,
static int ssm2602_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
- u16 reg = snd_soc_read(codec, SSM2602_PWR) & 0xff7f;
+ u16 reg = snd_soc_read(codec, SSM2602_PWR);
+ reg &= ~(PWR_POWER_OFF | PWR_OSC_PDN);
switch (level) {
case SND_SOC_BIAS_ON:
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index ffa2ffe5ec1..aa091a0d818 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -1454,8 +1454,8 @@ static int wm8753_probe(struct snd_soc_codec *codec)
/* set the update bits */
snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100);
- snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100);
- snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100);
+ snd_soc_update_bits(codec, WM8753_LADC, 0x0100, 0x0100);
+ snd_soc_update_bits(codec, WM8753_RADC, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8753_LOUT1V, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8753_ROUT1V, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8753_LOUT2V, 0x0100, 0x0100);
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 1725550c293..d2c315fa1b9 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3479,31 +3479,6 @@ int wm8962_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack)
}
EXPORT_SYMBOL_GPL(wm8962_mic_detect);
-#ifdef CONFIG_PM
-static int wm8962_resume(struct snd_soc_codec *codec)
-{
- u16 *reg_cache = codec->reg_cache;
- int i;
-
- /* Restore the registers */
- for (i = 1; i < codec->driver->reg_cache_size; i++) {
- switch (i) {
- case WM8962_SOFTWARE_RESET:
- continue;
- default:
- break;
- }
-
- if (reg_cache[i] != wm8962_reg[i])
- snd_soc_write(codec, i, reg_cache[i]);
- }
-
- return 0;
-}
-#else
-#define wm8962_resume NULL
-#endif
-
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int beep_rates[] = {
500, 1000, 2000, 4000,
@@ -4015,7 +3990,6 @@ static int wm8962_remove(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_wm8962 = {
.probe = wm8962_probe,
.remove = wm8962_remove,
- .resume = wm8962_resume,
.set_bias_level = wm8962_set_bias_level,
.reg_cache_size = WM8962_MAX_REGISTER + 1,
.reg_word_size = sizeof(u16),
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index fd0dc46afc3..5c6c2457386 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -369,7 +369,7 @@ static struct snd_soc_platform_driver mpc5200_audio_dma_platform = {
.pcm_free = &psc_dma_free,
};
-static int mpc5200_hpcd_probe(struct of_device *op)
+static int mpc5200_hpcd_probe(struct platform_device *op)
{
phys_addr_t fifo;
struct psc_dma *psc_dma;
@@ -487,7 +487,7 @@ out_unmap:
return ret;
}
-static int mpc5200_hpcd_remove(struct of_device *op)
+static int mpc5200_hpcd_remove(struct platform_device *op)
{
struct psc_dma *psc_dma = dev_get_drvdata(&op->dev);
@@ -519,7 +519,7 @@ MODULE_DEVICE_TABLE(of, mpc5200_hpcd_match);
static struct platform_driver mpc5200_hpcd_of_driver = {
.probe = mpc5200_hpcd_probe,
.remove = mpc5200_hpcd_remove,
- .dev = {
+ .driver = {
.owner = THIS_MODULE,
.name = "mpc5200-pcm-audio",
.of_match_table = mpc5200_hpcd_match,
diff --git a/sound/soc/imx/imx-pcm-fiq.c b/sound/soc/imx/imx-pcm-fiq.c
index 309c59e6fb6..7945625e0e0 100644
--- a/sound/soc/imx/imx-pcm-fiq.c
+++ b/sound/soc/imx/imx-pcm-fiq.c
@@ -240,7 +240,6 @@ static int ssi_irq = 0;
static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_card *card = rtd->card->snd_card;
struct snd_soc_dai *dai = rtd->cpu_dai;
struct snd_pcm *pcm = rtd->pcm;
int ret;
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index 8f16cd37c2a..d0bcf3fcea0 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -424,7 +424,7 @@ static __devinit int kirkwood_i2s_dev_probe(struct platform_device *pdev)
if (!priv->mem) {
dev_err(&pdev->dev, "request_mem_region failed\n");
err = -EBUSY;
- goto error_alloc;
+ goto err_alloc;
}
priv->io = ioremap(priv->mem->start, SZ_16K);
diff --git a/sound/soc/omap/mcpdm.c b/sound/soc/omap/mcpdm.c
index 928f0370745..50e59194ad8 100644
--- a/sound/soc/omap/mcpdm.c
+++ b/sound/soc/omap/mcpdm.c
@@ -449,7 +449,7 @@ exit:
return ret;
}
-int __devexit omap_mcpdm_remove(struct platform_device *pdev)
+int omap_mcpdm_remove(struct platform_device *pdev)
{
struct omap_mcpdm *mcpdm_ptr = platform_get_drvdata(pdev);
diff --git a/sound/soc/omap/mcpdm.h b/sound/soc/omap/mcpdm.h
index df3e16fb51f..20c20a8649f 100644
--- a/sound/soc/omap/mcpdm.h
+++ b/sound/soc/omap/mcpdm.h
@@ -150,4 +150,4 @@ extern int omap_mcpdm_request(void);
extern void omap_mcpdm_free(void);
extern int omap_mcpdm_set_offset(int offset1, int offset2);
int __devinit omap_mcpdm_probe(struct platform_device *pdev);
-int __devexit omap_mcpdm_remove(struct platform_device *pdev);
+int omap_mcpdm_remove(struct platform_device *pdev);
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index ebcc2d4d2b1..478d6077845 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -516,6 +516,12 @@ static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs;
int err = 0;
+ if (mcbsp_data->active)
+ if (freq == mcbsp_data->in_freq)
+ return 0;
+ else
+ return -EBUSY;
+
/* The McBSP signal muxing functions are only available on McBSP1 */
if (clk_id == OMAP_MCBSP_CLKR_SRC_CLKR ||
clk_id == OMAP_MCBSP_CLKR_SRC_CLKX ||
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
index b6445757fc5..2b8350b5223 100644
--- a/sound/soc/pxa/zylonite.c
+++ b/sound/soc/pxa/zylonite.c
@@ -196,20 +196,20 @@ static int zylonite_probe(struct snd_soc_card *card)
if (clk_pout) {
pout = clk_get(NULL, "CLK_POUT");
if (IS_ERR(pout)) {
- dev_err(&pdev->dev, "Unable to obtain CLK_POUT: %ld\n",
+ dev_err(card->dev, "Unable to obtain CLK_POUT: %ld\n",
PTR_ERR(pout));
return PTR_ERR(pout);
}
ret = clk_enable(pout);
if (ret != 0) {
- dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n",
+ dev_err(card->dev, "Unable to enable CLK_POUT: %d\n",
ret);
clk_put(pout);
return ret;
}
- dev_dbg(&pdev->dev, "MCLK enabled at %luHz\n",
+ dev_dbg(card->dev, "MCLK enabled at %luHz\n",
clk_get_rate(pout));
}
@@ -241,7 +241,7 @@ static int zylonite_resume_pre(struct snd_soc_card *card)
if (clk_pout) {
ret = clk_enable(pout);
if (ret != 0)
- dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n",
+ dev_err(card->dev, "Unable to enable CLK_POUT: %d\n",
ret);
}
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index d9f8aded51f..20b7f3b003a 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -203,14 +203,14 @@ static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec)
rbnode = rb_entry(node, struct snd_soc_rbtree_node, node);
for (i = 0; i < rbnode->blklen; ++i) {
regtmp = rbnode->base_reg + i;
- WARN_ON(codec->writable_register &&
- codec->writable_register(codec, regtmp));
val = snd_soc_rbtree_get_register(rbnode, i);
def = snd_soc_get_cache_val(codec->reg_def_copy, i,
rbnode->word_size);
if (val == def)
continue;
+ WARN_ON(!snd_soc_codec_writable_register(codec, regtmp));
+
codec->cache_bypass = 1;
ret = snd_soc_write(codec, regtmp, val);
codec->cache_bypass = 0;
@@ -563,8 +563,7 @@ static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec)
lzo_blocks = codec->reg_cache;
for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) {
- WARN_ON(codec->writable_register &&
- codec->writable_register(codec, i));
+ WARN_ON(!snd_soc_codec_writable_register(codec, i));
ret = snd_soc_cache_read(codec, i, &val);
if (ret)
return ret;
@@ -823,8 +822,6 @@ static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
codec_drv = codec->driver;
for (i = 0; i < codec_drv->reg_cache_size; ++i) {
- WARN_ON(codec->writable_register &&
- codec->writable_register(codec, i));
ret = snd_soc_cache_read(codec, i, &val);
if (ret)
return ret;
@@ -832,6 +829,9 @@ static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
if (snd_soc_get_cache_val(codec->reg_def_copy,
i, codec_drv->reg_word_size) == val)
continue;
+
+ WARN_ON(!snd_soc_codec_writable_register(codec, i));
+
ret = snd_soc_write(codec, i, val);
if (ret)
return ret;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index b085d8e8757..ef69f5a0270 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -30,6 +30,7 @@
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
+#include <linux/ctype.h>
#include <linux/slab.h>
#include <sound/ac97_codec.h>
#include <sound/core.h>
@@ -1434,9 +1435,20 @@ static void snd_soc_instantiate_card(struct snd_soc_card *card)
"%s", card->name);
snprintf(card->snd_card->longname, sizeof(card->snd_card->longname),
"%s", card->long_name ? card->long_name : card->name);
- if (card->driver_name)
- strlcpy(card->snd_card->driver, card->driver_name,
- sizeof(card->snd_card->driver));
+ snprintf(card->snd_card->driver, sizeof(card->snd_card->driver),
+ "%s", card->driver_name ? card->driver_name : card->name);
+ for (i = 0; i < ARRAY_SIZE(card->snd_card->driver); i++) {
+ switch (card->snd_card->driver[i]) {
+ case '_':
+ case '-':
+ case '\0':
+ break;
+ default:
+ if (!isalnum(card->snd_card->driver[i]))
+ card->snd_card->driver[i] = '_';
+ break;
+ }
+ }
if (card->late_probe) {
ret = card->late_probe(card);
@@ -1633,7 +1645,7 @@ int snd_soc_codec_readable_register(struct snd_soc_codec *codec,
if (codec->readable_register)
return codec->readable_register(codec, reg);
else
- return 0;
+ return 1;
}
EXPORT_SYMBOL_GPL(snd_soc_codec_readable_register);
@@ -1651,7 +1663,7 @@ int snd_soc_codec_writable_register(struct snd_soc_codec *codec,
if (codec->writable_register)
return codec->writable_register(codec, reg);
else
- return 0;
+ return 1;
}
EXPORT_SYMBOL_GPL(snd_soc_codec_writable_register);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 7e15914b363..d67c637557a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2763,7 +2763,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend);
/**
* snd_soc_dapm_free - free dapm resources
- * @card: SoC device
+ * @dapm: DAPM context
*
* Free all dapm widgets and resources.
*/
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index 38b00131b2f..fa31d9c2abd 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -105,7 +105,7 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
snd_soc_dapm_sync(dapm);
- snd_jack_report(jack->jack, status);
+ snd_jack_report(jack->jack, jack->status);
out:
mutex_unlock(&codec->mutex);
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 781d9e61adf..d8f2bf40145 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -530,8 +530,11 @@ snd_usb_audio_probe(struct usb_device *dev,
return chip;
__error:
- if (chip && !chip->num_interfaces)
- snd_card_free(chip->card);
+ if (chip) {
+ if (!chip->num_interfaces)
+ snd_card_free(chip->card);
+ chip->probing = 0;
+ }
mutex_unlock(&register_mutex);
__err_val:
return NULL;
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 3b8f7b80376..e9d5c271db6 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -30,6 +30,8 @@ endif
# Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds.
#
# Define NO_DWARF if you do not want debug-info analysis feature at all.
+#
+# Define WERROR=0 to disable treating any warnings as errors.
$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
@$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
@@ -63,6 +65,11 @@ ifeq ($(ARCH),x86_64)
endif
endif
+# Treat warnings as errors unless directed not to
+ifneq ($(WERROR),0)
+ CFLAGS_WERROR := -Werror
+endif
+
#
# Include saner warnings here, which can catch bugs:
#
@@ -95,7 +102,7 @@ ifndef PERF_DEBUG
CFLAGS_OPTIMIZE = -O6
endif
-CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
+CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
EXTLIBS = -lpthread -lrt -lelf -lm
ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
ALL_LDFLAGS = $(LDFLAGS)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 6b0519f885e..f4c3fbee4ba 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -161,6 +161,7 @@ static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist)
struct perf_event_attr *attr = &evsel->attr;
int track = !evsel->idx; /* only the first counter needs these */
+ attr->disabled = 1;
attr->inherit = !no_inherit;
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING |
@@ -671,6 +672,8 @@ static int __cmd_record(int argc, const char **argv)
}
}
+ perf_evlist__enable(evsel_list);
+
/*
* Let the child rip
*/
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 55f4c76f282..efe696f936e 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -561,7 +561,7 @@ static int test__basic_mmap(void)
}
err = perf_event__parse_sample(event, attr.sample_type, sample_size,
- false, &sample);
+ false, &sample, false);
if (err) {
pr_err("Can't parse sample, err = %d\n", err);
goto out_munmap;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index a43433f0830..d28013b7d61 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -191,7 +191,8 @@ static void __zero_source_counters(struct sym_entry *syme)
symbol__annotate_zero_histograms(sym);
}
-static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
+static void record_precise_ip(struct sym_entry *syme, struct map *map,
+ int counter, u64 ip)
{
struct annotation *notes;
struct symbol *sym;
@@ -205,8 +206,8 @@ static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
if (pthread_mutex_trylock(&notes->lock))
return;
- ip = syme->map->map_ip(syme->map, ip);
- symbol__inc_addr_samples(sym, syme->map, counter, ip);
+ ip = map->map_ip(map, ip);
+ symbol__inc_addr_samples(sym, map, counter, ip);
pthread_mutex_unlock(&notes->lock);
}
@@ -810,7 +811,7 @@ static void perf_event__process_sample(const union perf_event *event,
evsel = perf_evlist__id2evsel(top.evlist, sample->id);
assert(evsel != NULL);
syme->count[evsel->idx]++;
- record_precise_ip(syme, evsel->idx, ip);
+ record_precise_ip(syme, al.map, evsel->idx, ip);
pthread_mutex_lock(&top.active_symbols_lock);
if (list_empty(&syme->node) || !syme->node.next) {
static bool first = true;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 3c1b8a63210..437f8ca679a 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -169,12 +169,17 @@ static int perf_event__synthesize_mmap_events(union perf_event *event,
continue;
pbf += n + 3;
if (*pbf == 'x') { /* vm_exec */
+ char anonstr[] = "//anon\n";
char *execname = strchr(bf, '/');
/* Catch VDSO */
if (execname == NULL)
execname = strstr(bf, "[vdso]");
+ /* Catch anonymous mmaps */
+ if ((execname == NULL) && !strstr(bf, "["))
+ execname = anonstr;
+
if (execname == NULL)
continue;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 1d7f66488a8..357a85b8524 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -186,6 +186,6 @@ const char *perf_event__name(unsigned int id);
int perf_event__parse_sample(const union perf_event *event, u64 type,
int sample_size, bool sample_id_all,
- struct perf_sample *sample);
+ struct perf_sample *sample, bool swapped);
#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index c12bd476c6f..72e9f4886b6 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -113,6 +113,19 @@ void perf_evlist__disable(struct perf_evlist *evlist)
}
}
+void perf_evlist__enable(struct perf_evlist *evlist)
+{
+ int cpu, thread;
+ struct perf_evsel *pos;
+
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ list_for_each_entry(pos, &evlist->entries, node) {
+ for (thread = 0; thread < evlist->threads->nr; thread++)
+ ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
+ }
+ }
+}
+
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{
int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index ce85ae9ae57..f3491500274 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -54,6 +54,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
void perf_evlist__munmap(struct perf_evlist *evlist);
void perf_evlist__disable(struct perf_evlist *evlist);
+void perf_evlist__enable(struct perf_evlist *evlist);
static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
struct cpu_map *cpus,
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index a03a36b7908..e389815078d 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -7,6 +7,8 @@
* Released under the GPL v2. (and only v2, not any later version)
*/
+#include <byteswap.h>
+#include "asm/bug.h"
#include "evsel.h"
#include "evlist.h"
#include "util.h"
@@ -342,10 +344,20 @@ static bool sample_overlap(const union perf_event *event,
int perf_event__parse_sample(const union perf_event *event, u64 type,
int sample_size, bool sample_id_all,
- struct perf_sample *data)
+ struct perf_sample *data, bool swapped)
{
const u64 *array;
+ /*
+ * used for cross-endian analysis. See git commit 65014ab3
+ * for why this goofiness is needed.
+ */
+ union {
+ u64 val64;
+ u32 val32[2];
+ } u;
+
+
data->cpu = data->pid = data->tid = -1;
data->stream_id = data->id = data->time = -1ULL;
@@ -366,9 +378,16 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
}
if (type & PERF_SAMPLE_TID) {
- u32 *p = (u32 *)array;
- data->pid = p[0];
- data->tid = p[1];
+ u.val64 = *array;
+ if (swapped) {
+ /* undo swap of u64, then swap on individual u32s */
+ u.val64 = bswap_64(u.val64);
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ }
+
+ data->pid = u.val32[0];
+ data->tid = u.val32[1];
array++;
}
@@ -395,8 +414,15 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
}
if (type & PERF_SAMPLE_CPU) {
- u32 *p = (u32 *)array;
- data->cpu = *p;
+
+ u.val64 = *array;
+ if (swapped) {
+ /* undo swap of u64, then swap on individual u32s */
+ u.val64 = bswap_64(u.val64);
+ u.val32[0] = bswap_32(u.val32[0]);
+ }
+
+ data->cpu = u.val32[0];
array++;
}
@@ -423,18 +449,27 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
}
if (type & PERF_SAMPLE_RAW) {
- u32 *p = (u32 *)array;
+ const u64 *pdata;
+
+ u.val64 = *array;
+ if (WARN_ONCE(swapped,
+ "Endianness of raw data not corrected!\n")) {
+ /* undo swap of u64, then swap on individual u32s */
+ u.val64 = bswap_64(u.val64);
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ }
if (sample_overlap(event, array, sizeof(u32)))
return -EFAULT;
- data->raw_size = *p;
- p++;
+ data->raw_size = u.val32[0];
+ pdata = (void *) array + sizeof(u32);
- if (sample_overlap(event, p, data->raw_size))
+ if (sample_overlap(event, pdata, data->raw_size))
return -EFAULT;
- data->raw_data = p;
+ data->raw_data = (void *) pdata;
}
return 0;
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 555fc3864b9..5d732621a46 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -659,7 +659,7 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die))
ret = -ENOENT;
}
- if (ret == 0)
+ if (ret >= 0)
ret = convert_variable(&vr_die, pf);
if (ret < 0)
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index cbc8f215d4b..7624324efad 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -803,7 +803,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
first = list_entry(evlist->entries.next, struct perf_evsel, node);
err = perf_event__parse_sample(event, first->attr.sample_type,
perf_evsel__sample_size(first),
- sample_id_all, &pevent->sample);
+ sample_id_all, &pevent->sample, false);
if (err)
return PyErr_Format(PyExc_OSError,
"perf: can't parse sample, err=%d", err);
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 170601e67d6..974d0cbee5e 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -162,7 +162,8 @@ static inline int perf_session__parse_sample(struct perf_session *session,
{
return perf_event__parse_sample(event, session->sample_type,
session->sample_size,
- session->sample_id_all, sample);
+ session->sample_id_all, sample,
+ session->header.needs_swap);
}
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 401e220566f..1ee8f1e40f1 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -151,11 +151,17 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
{
u64 ip_l, ip_r;
+ if (!left->ms.sym && !right->ms.sym)
+ return right->level - left->level;
+
+ if (!left->ms.sym || !right->ms.sym)
+ return cmp_null(left->ms.sym, right->ms.sym);
+
if (left->ms.sym == right->ms.sym)
return 0;
- ip_l = left->ms.sym ? left->ms.sym->start : left->ip;
- ip_r = right->ms.sym ? right->ms.sym->start : right->ip;
+ ip_l = left->ms.sym->start;
+ ip_r = right->ms.sym->start;
return (int64_t)(ip_r - ip_l);
}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 469c0264ed2..40eeaf07725 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -74,16 +74,104 @@ static void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
bool symbol_type__is_a(char symbol_type, enum map_type map_type)
{
+ symbol_type = toupper(symbol_type);
+
switch (map_type) {
case MAP__FUNCTION:
return symbol_type == 'T' || symbol_type == 'W';
case MAP__VARIABLE:
- return symbol_type == 'D' || symbol_type == 'd';
+ return symbol_type == 'D';
default:
return false;
}
}
+static int prefix_underscores_count(const char *str)
+{
+ const char *tail = str;
+
+ while (*tail == '_')
+ tail++;
+
+ return tail - str;
+}
+
+#define SYMBOL_A 0
+#define SYMBOL_B 1
+
+static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
+{
+ s64 a;
+ s64 b;
+
+ /* Prefer a symbol with non zero length */
+ a = syma->end - syma->start;
+ b = symb->end - symb->start;
+ if ((b == 0) && (a > 0))
+ return SYMBOL_A;
+ else if ((a == 0) && (b > 0))
+ return SYMBOL_B;
+
+ /* Prefer a non weak symbol over a weak one */
+ a = syma->binding == STB_WEAK;
+ b = symb->binding == STB_WEAK;
+ if (b && !a)
+ return SYMBOL_A;
+ if (a && !b)
+ return SYMBOL_B;
+
+ /* Prefer a global symbol over a non global one */
+ a = syma->binding == STB_GLOBAL;
+ b = symb->binding == STB_GLOBAL;
+ if (a && !b)
+ return SYMBOL_A;
+ if (b && !a)
+ return SYMBOL_B;
+
+ /* Prefer a symbol with less underscores */
+ a = prefix_underscores_count(syma->name);
+ b = prefix_underscores_count(symb->name);
+ if (b > a)
+ return SYMBOL_A;
+ else if (a > b)
+ return SYMBOL_B;
+
+ /* If all else fails, choose the symbol with the longest name */
+ if (strlen(syma->name) >= strlen(symb->name))
+ return SYMBOL_A;
+ else
+ return SYMBOL_B;
+}
+
+static void symbols__fixup_duplicate(struct rb_root *symbols)
+{
+ struct rb_node *nd;
+ struct symbol *curr, *next;
+
+ nd = rb_first(symbols);
+
+ while (nd) {
+ curr = rb_entry(nd, struct symbol, rb_node);
+again:
+ nd = rb_next(&curr->rb_node);
+ next = rb_entry(nd, struct symbol, rb_node);
+
+ if (!nd)
+ break;
+
+ if (curr->start != next->start)
+ continue;
+
+ if (choose_best_symbol(curr, next) == SYMBOL_A) {
+ rb_erase(&next->rb_node, symbols);
+ goto again;
+ } else {
+ nd = rb_next(&curr->rb_node);
+ rb_erase(&curr->rb_node, symbols);
+ }
+ }
+}
+
static void symbols__fixup_end(struct rb_root *symbols)
{
struct rb_node *nd, *prevnd = rb_first(symbols);
@@ -438,18 +526,11 @@ int kallsyms__parse(const char *filename, void *arg,
char *line = NULL;
size_t n;
int err = -1;
- u64 prev_start = 0;
- char prev_symbol_type = 0;
- char *prev_symbol_name;
FILE *file = fopen(filename, "r");
if (file == NULL)
goto out_failure;
- prev_symbol_name = malloc(KSYM_NAME_LEN);
- if (prev_symbol_name == NULL)
- goto out_close;
-
err = 0;
while (!feof(file)) {
@@ -470,7 +551,7 @@ int kallsyms__parse(const char *filename, void *arg,
if (len + 2 >= line_len)
continue;
- symbol_type = toupper(line[len]);
+ symbol_type = line[len];
len += 2;
symbol_name = line + len;
len = line_len - len;
@@ -480,24 +561,18 @@ int kallsyms__parse(const char *filename, void *arg,
break;
}
- if (prev_symbol_type) {
- u64 end = start;
- if (end != prev_start)
- --end;
- err = process_symbol(arg, prev_symbol_name,
- prev_symbol_type, prev_start, end);
- if (err)
- break;
- }
-
- memcpy(prev_symbol_name, symbol_name, len + 1);
- prev_symbol_type = symbol_type;
- prev_start = start;
+ /*
+ * module symbols are not sorted so we add all
+ * symbols with zero length and rely on
+ * symbols__fixup_end() to fix it up.
+ */
+ err = process_symbol(arg, symbol_name,
+ symbol_type, start, start);
+ if (err)
+ break;
}
- free(prev_symbol_name);
free(line);
-out_close:
fclose(file);
return err;
@@ -703,6 +778,9 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
if (dso__load_all_kallsyms(dso, filename, map) < 0)
return -1;
+ symbols__fixup_duplicate(&dso->symbols[map->type]);
+ symbols__fixup_end(&dso->symbols[map->type]);
+
if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
dso->symtab_type = SYMTAB__GUEST_KALLSYMS;
else
@@ -1092,8 +1170,7 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
if (dso->has_build_id) {
u8 build_id[BUILD_ID_SIZE];
- if (elf_read_build_id(elf, build_id,
- BUILD_ID_SIZE) != BUILD_ID_SIZE)
+ if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0)
goto out_elf_end;
if (!dso__build_id_equal(dso, build_id))
@@ -1111,6 +1188,8 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
}
opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx);
+ if (opdshdr.sh_type != SHT_PROGBITS)
+ opdsec = NULL;
if (opdsec)
opddata = elf_rawdata(opdsec, NULL);
@@ -1276,6 +1355,7 @@ new_symbol:
* For misannotated, zeroed, ASM function sizes.
*/
if (nr > 0) {
+ symbols__fixup_duplicate(&dso->symbols[map->type]);
symbols__fixup_end(&dso->symbols[map->type]);
if (kmap) {
/*
@@ -1362,8 +1442,8 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size)
ptr = data->d_buf;
while (ptr < (data->d_buf + data->d_size)) {
GElf_Nhdr *nhdr = ptr;
- int namesz = NOTE_ALIGN(nhdr->n_namesz),
- descsz = NOTE_ALIGN(nhdr->n_descsz);
+ size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
+ descsz = NOTE_ALIGN(nhdr->n_descsz);
const char *name;
ptr += sizeof(*nhdr);
@@ -1372,8 +1452,10 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size)
if (nhdr->n_type == NT_GNU_BUILD_ID &&
nhdr->n_namesz == sizeof("GNU")) {
if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
- memcpy(bf, ptr, BUILD_ID_SIZE);
- err = BUILD_ID_SIZE;
+ size_t sz = min(size, descsz);
+ memcpy(bf, ptr, sz);
+ memset(bf + sz, 0, size - sz);
+ err = descsz;
break;
}
}
@@ -1425,7 +1507,7 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
while (1) {
char bf[BUFSIZ];
GElf_Nhdr nhdr;
- int namesz, descsz;
+ size_t namesz, descsz;
if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
break;
@@ -1434,15 +1516,16 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
descsz = NOTE_ALIGN(nhdr.n_descsz);
if (nhdr.n_type == NT_GNU_BUILD_ID &&
nhdr.n_namesz == sizeof("GNU")) {
- if (read(fd, bf, namesz) != namesz)
+ if (read(fd, bf, namesz) != (ssize_t)namesz)
break;
if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
- if (read(fd, build_id,
- BUILD_ID_SIZE) == BUILD_ID_SIZE) {
+ size_t sz = min(descsz, size);
+ if (read(fd, build_id, sz) == (ssize_t)sz) {
+ memset(build_id + sz, 0, size - sz);
err = 0;
break;
}
- } else if (read(fd, bf, descsz) != descsz)
+ } else if (read(fd, bf, descsz) != (ssize_t)descsz)
break;
} else {
int n = namesz + descsz;