summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb16
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt17
-rw-r--r--Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/omap/omap.txt1
-rw-r--r--Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt42
-rw-r--r--Documentation/devicetree/bindings/display/hisilicon/hisi-drm.txt66
-rw-r--r--Documentation/devicetree/bindings/display/hisilicon/hisi-dsi.txt53
-rw-r--r--Documentation/devicetree/bindings/mailbox/hisilicon,hi6220-mailbox.txt57
-rw-r--r--Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt27
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt6
-rw-r--r--Documentation/devicetree/bindings/phy/phy-hi6220-usb.txt16
-rw-r--r--Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt12
-rw-r--r--Documentation/devicetree/bindings/regulator/hisilicon,hi655x-regulator.txt28
-rw-r--r--Documentation/devicetree/bindings/reset/hisilicon,hi6220-reset.txt34
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.txt1
-rw-r--r--Documentation/filesystems/efivarfs.txt7
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/usb/power-management.txt11
-rw-r--r--Documentation/virtual/kvm/mmu.txt3
-rw-r--r--MAINTAINERS100
-rw-r--r--Makefile2
-rw-r--r--arch/arc/include/asm/bitops.h15
-rw-r--r--arch/arc/include/asm/io.h18
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h11
-rw-r--r--arch/arc/kernel/entry-arcv2.S30
-rw-r--r--arch/arc/kernel/mcip.c15
-rw-r--r--arch/arm/Kconfig.debug17
-rw-r--r--arch/arm/boot/dts/armada-388-gp.dts10
-rw-r--r--arch/arm/boot/dts/armada-xp-axpwifiap.dts4
-rw-r--r--arch/arm/boot/dts/armada-xp-db.dts4
-rw-r--r--arch/arm/boot/dts/armada-xp-gp.dts4
-rw-r--r--arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts4
-rw-r--r--arch/arm/boot/dts/armada-xp-linksys-mamba.dts4
-rw-r--r--arch/arm/boot/dts/armada-xp-matrix.dts4
-rw-r--r--arch/arm/boot/dts/armada-xp-netgear-rn2120.dts4
-rw-r--r--arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts6
-rw-r--r--arch/arm/boot/dts/armada-xp-synology-ds414.dts4
-rw-r--r--arch/arm/boot/dts/at91-sama5d3_xplained.dts1
-rw-r--r--arch/arm/boot/dts/at91-sama5d4_xplained.dts9
-rw-r--r--arch/arm/boot/dts/at91-sama5d4ek.dts11
-rw-r--r--arch/arm/boot/dts/dra7.dtsi10
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-som.dtsi1
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi33
-rw-r--r--arch/arm/boot/dts/sama5d2-pinfunc.h2
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-nomadik-stn8815.dtsi37
-rw-r--r--arch/arm/common/icst.c9
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h21
-rw-r--r--arch/arm/kvm/guest.c2
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c9
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h3
-rw-r--r--arch/arm/mach-omap2/sleep34xx.S61
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S25
-rw-r--r--arch/arm/mach-s3c64xx/dev-audio.c41
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/dma.h52
-rw-r--r--arch/arm/plat-samsung/devs.c11
-rw-r--r--arch/arm64/Kconfig.platforms2
-rw-r--r--arch/arm64/Makefile1
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts296
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220.dtsi764
-rw-r--r--arch/arm64/boot/dts/hisilicon/hikey-gpio.dtsi607
-rw-r--r--arch/arm64/boot/dts/hisilicon/hikey-pinctrl.dtsi705
-rw-r--r--arch/arm64/configs/hikey_defconfig500
-rw-r--r--arch/arm64/include/asm/opcodes.h4
-rw-r--r--arch/arm64/include/asm/pgtable.h6
-rw-r--r--arch/arm64/kernel/debug-monitors.c21
-rw-r--r--arch/arm64/kernel/head.S5
-rw-r--r--arch/arm64/kernel/perf_event.c3
-rw-r--r--arch/arm64/kernel/ptrace.c6
-rw-r--r--arch/arm64/kvm/guest.c2
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/init.c4
-rw-r--r--arch/arm64/mm/mmu.c3
-rw-r--r--arch/arm64/mm/pageattr.c3
-rw-r--r--arch/arm64/mm/proc-macros.S12
-rw-r--r--arch/arm64/mm/proc.S2
-rw-r--r--arch/ia64/include/asm/io.h1
-rw-r--r--arch/m32r/kernel/setup.c3
-rw-r--r--arch/mips/Kconfig7
-rw-r--r--arch/mips/alchemy/devboards/db1000.c18
-rw-r--r--arch/mips/alchemy/devboards/db1550.c4
-rw-r--r--arch/mips/include/asm/page.h2
-rw-r--r--arch/mips/include/asm/pgtable.h4
-rw-r--r--arch/mips/include/asm/syscall.h4
-rw-r--r--arch/mips/kernel/smp.c1
-rw-r--r--arch/mips/kernel/traps.c13
-rw-r--r--arch/mips/kernel/unaligned.c51
-rw-r--r--arch/mips/kvm/mips.c4
-rw-r--r--arch/mips/loongson64/loongson-3/hpet.c10
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c20
-rw-r--r--arch/mips/mm/sc-mips.c13
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/parisc/include/asm/hugetlb.h20
-rw-r--r--arch/parisc/include/asm/uaccess.h1
-rw-r--r--arch/parisc/include/uapi/asm/siginfo.h4
-rw-r--r--arch/parisc/kernel/asm-offsets.c1
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c10
-rw-r--r--arch/parisc/kernel/ptrace.c16
-rw-r--r--arch/parisc/kernel/syscall.S5
-rw-r--r--arch/parisc/kernel/traps.c3
-rw-r--r--arch/parisc/lib/fixup.S6
-rw-r--r--arch/parisc/mm/fault.c1
-rw-r--r--arch/parisc/mm/hugetlbpage.c60
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h16
-rw-r--r--arch/powerpc/include/asm/eeh.h1
-rw-r--r--arch/powerpc/include/asm/opal-api.h3
-rw-r--r--arch/powerpc/include/asm/opal.h3
-rw-r--r--arch/powerpc/include/asm/synch.h2
-rw-r--r--arch/powerpc/include/uapi/asm/elf.h2
-rw-r--r--arch/powerpc/kernel/eeh_driver.c6
-rw-r--r--arch/powerpc/kernel/eeh_pe.c35
-rw-r--r--arch/powerpc/kernel/module_64.c29
-rw-r--r--arch/powerpc/kernel/process.c18
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S16
-rw-r--r--arch/powerpc/kvm/powerpc.c20
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
-rw-r--r--arch/powerpc/platforms/powernv/Makefile1
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c5
-rw-r--r--arch/powerpc/platforms/powernv/opal-kmsg.c75
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S1
-rw-r--r--arch/powerpc/platforms/powernv/opal.c3
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.c26
-rw-r--r--arch/powerpc/platforms/powernv/pci.h1
-rw-r--r--arch/s390/include/asm/fpu/internal.h2
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/include/asm/mmu_context.h16
-rw-r--r--arch/s390/include/asm/pci.h2
-rw-r--r--arch/s390/include/asm/pgalloc.h24
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/entry.S106
-rw-r--r--arch/s390/kernel/head64.S2
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kvm/kvm-s390.c149
-rw-r--r--arch/s390/mm/extable.c8
-rw-r--r--arch/s390/pci/pci.c5
-rw-r--r--arch/sh/mm/kmap.c2
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c2
-rw-r--r--arch/um/drivers/mconsole_kern.c2
-rw-r--r--arch/um/os-Linux/start_up.c2
-rw-r--r--arch/x86/Kconfig27
-rw-r--r--arch/x86/crypto/chacha20-ssse3-x86_64.S6
-rw-r--r--arch/x86/entry/common.c23
-rw-r--r--arch/x86/entry/entry_64_compat.S1
-rw-r--r--arch/x86/include/asm/apic.h2
-rw-r--r--arch/x86/include/asm/boot.h2
-rw-r--r--arch/x86/include/asm/hw_irq.h1
-rw-r--r--arch/x86/include/asm/irq.h5
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/microcode.h26
-rw-r--r--arch/x86/include/asm/mmu_context.h34
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/asm/perf_event.h1
-rw-r--r--arch/x86/include/asm/pgtable_types.h6
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h2
-rw-r--r--arch/x86/kernel/acpi/sleep.c7
-rw-r--r--arch/x86/kernel/apic/io_apic.c6
-rw-r--r--arch/x86/kernel/apic/vector.c273
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c38
-rw-r--r--arch/x86/kernel/cpu/perf_event.c13
-rw-r--r--arch/x86/kernel/cpu/perf_event.h3
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c27
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c24
-rw-r--r--arch/x86/kernel/cpu/perf_event_knc.c4
-rw-r--r--arch/x86/kernel/ioport.c12
-rw-r--r--arch/x86/kernel/irq.c11
-rw-r--r--arch/x86/kernel/process_64.c12
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kvm/emulate.c4
-rw-r--r--arch/x86/kvm/i8254.c12
-rw-r--r--arch/x86/kvm/mmu.c4
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/trace.h2
-rw-r--r--arch/x86/kvm/vmx.c76
-rw-r--r--arch/x86/kvm/x86.c40
-rw-r--r--arch/x86/lib/copy_user_64.S142
-rw-r--r--arch/x86/mm/fault.c15
-rw-r--r--arch/x86/mm/mpx.c2
-rw-r--r--arch/x86/mm/pageattr.c18
-rw-r--r--arch/x86/mm/tlb.c41
-rw-r--r--arch/x86/pci/common.c26
-rw-r--r--arch/x86/pci/fixup.c7
-rw-r--r--arch/x86/pci/intel_mid_pci.c9
-rw-r--r--arch/x86/pci/irq.c23
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/x86/xen/suspend.c3
-rw-r--r--arch/xtensa/kernel/head.S2
-rw-r--r--arch/xtensa/mm/cache.c8
-rw-r--r--arch/xtensa/platforms/iss/console.c10
-rw-r--r--block/bio.c7
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-merge.c31
-rw-r--r--block/blk-settings.c4
-rw-r--r--build.config13
-rw-r--r--crypto/af_alg.c55
-rw-r--r--crypto/ahash.c5
-rw-r--r--crypto/algif_hash.c169
-rw-r--r--crypto/algif_skcipher.c252
-rw-r--r--crypto/asymmetric_keys/pkcs7_trust.c2
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c8
-rw-r--r--crypto/crc32c_generic.c1
-rw-r--r--crypto/crypto_user.c6
-rw-r--r--crypto/keywrap.c4
-rw-r--r--crypto/shash.c5
-rw-r--r--crypto/skcipher.c2
-rw-r--r--drivers/acpi/acpi_video.c18
-rw-r--r--drivers/acpi/nfit.c71
-rw-r--r--drivers/acpi/pci_irq.c17
-rw-r--r--drivers/acpi/resource.c14
-rw-r--r--drivers/acpi/sleep.c1
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/ata/ahci.c26
-rw-r--r--drivers/ata/libahci.c7
-rw-r--r--drivers/ata/libata-scsi.c11
-rw-r--r--drivers/ata/libata-sff.c32
-rw-r--r--drivers/ata/pata_rb532_cf.c11
-rw-r--r--drivers/base/platform.c13
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c267
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h11
-rw-r--r--drivers/block/rbd.c6
-rw-r--r--drivers/block/zram/zcomp.c4
-rw-r--r--drivers/block/zram/zcomp_lz4.c23
-rw-r--r--drivers/block/zram/zcomp_lzo.c23
-rw-r--r--drivers/block/zram/zram_drv.c7
-rw-r--r--drivers/bluetooth/ath3k.c8
-rw-r--r--drivers/bluetooth/btusb.c8
-rw-r--r--drivers/bluetooth/btwilink.c11
-rw-r--r--drivers/char/tpm/tpm-chip.c14
-rw-r--r--drivers/char/tpm/tpm_crb.c4
-rw-r--r--drivers/char/tpm/tpm_eventlog.c14
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c12
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c88
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c48
-rw-r--r--drivers/clk/samsung/clk-cpu.c10
-rw-r--r--drivers/clocksource/tcb_clksrc.c3
-rw-r--r--drivers/clocksource/vt8500_timer.c6
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/cpufreq/arm_big_little.c41
-rw-r--r--drivers/cpufreq/cpufreq-dt.c9
-rw-r--r--drivers/cpufreq/cpufreq_governor.c11
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c2
-rw-r--r--drivers/crypto/atmel-aes.c4
-rw-r--r--drivers/crypto/atmel-sha.c27
-rw-r--r--drivers/crypto/atmel-tdes.c4
-rw-r--r--drivers/crypto/caam/ctrl.c4
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c36
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c40
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h22
-rw-r--r--drivers/crypto/marvell/cesa.c4
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c2
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c4
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c4
-rw-r--r--drivers/dma/at_xdmac.c45
-rw-r--r--drivers/dma/dw/core.c49
-rw-r--r--drivers/dma/pxa_dma.c8
-rw-r--r--drivers/edac/amd64_edac.c2
-rw-r--r--drivers/edac/edac_device.c11
-rw-r--r--drivers/edac/edac_mc.c14
-rw-r--r--drivers/edac/edac_mc_sysfs.c21
-rw-r--r--drivers/edac/edac_pci.c9
-rw-r--r--drivers/edac/sb_edac.c26
-rw-r--r--drivers/firmware/Kconfig10
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/efi/efivars.c35
-rw-r--r--drivers/firmware/efi/vars.c144
-rw-r--r--drivers/firmware/reboot_reason_sram.c107
-rw-r--r--drivers/gpu/Makefile2
-rw-r--r--drivers/gpu/arm/Kconfig1
-rw-r--r--drivers/gpu/arm/Makefile1
-rw-r--r--drivers/gpu/arm/utgard/Kbuild207
-rw-r--r--drivers/gpu/arm/utgard/Kconfig118
-rw-r--r--drivers/gpu/arm/utgard/Makefile190
-rw-r--r--drivers/gpu/arm/utgard/common/mali_broadcast.c142
-rw-r--r--drivers/gpu/arm/utgard/common/mali_broadcast.h57
-rw-r--r--drivers/gpu/arm/utgard/common/mali_control_timer.c128
-rw-r--r--drivers/gpu/arm/utgard/common/mali_control_timer.h28
-rw-r--r--drivers/gpu/arm/utgard/common/mali_dlbu.c213
-rw-r--r--drivers/gpu/arm/utgard/common/mali_dlbu.h45
-rw-r--r--drivers/gpu/arm/utgard/common/mali_dvfs_policy.c308
-rw-r--r--drivers/gpu/arm/utgard/common/mali_dvfs_policy.h34
-rw-r--r--drivers/gpu/arm/utgard/common/mali_executor.c2642
-rw-r--r--drivers/gpu/arm/utgard/common/mali_executor.h104
-rw-r--r--drivers/gpu/arm/utgard/common/mali_gp.c357
-rw-r--r--drivers/gpu/arm/utgard/common/mali_gp.h127
-rw-r--r--drivers/gpu/arm/utgard/common/mali_gp_job.c301
-rw-r--r--drivers/gpu/arm/utgard/common/mali_gp_job.h325
-rw-r--r--drivers/gpu/arm/utgard/common/mali_group.c1816
-rw-r--r--drivers/gpu/arm/utgard/common/mali_group.h467
-rw-r--r--drivers/gpu/arm/utgard/common/mali_hw_core.c47
-rw-r--r--drivers/gpu/arm/utgard/common/mali_hw_core.h111
-rw-r--r--drivers/gpu/arm/utgard/common/mali_kernel_common.h181
-rw-r--r--drivers/gpu/arm/utgard/common/mali_kernel_core.c1314
-rw-r--r--drivers/gpu/arm/utgard/common/mali_kernel_core.h57
-rw-r--r--drivers/gpu/arm/utgard/common/mali_kernel_utilization.c440
-rw-r--r--drivers/gpu/arm/utgard/common/mali_kernel_utilization.h72
-rw-r--r--drivers/gpu/arm/utgard/common/mali_kernel_vsync.c45
-rw-r--r--drivers/gpu/arm/utgard/common/mali_l2_cache.c534
-rw-r--r--drivers/gpu/arm/utgard/common/mali_l2_cache.h124
-rw-r--r--drivers/gpu/arm/utgard/common/mali_mem_validation.c65
-rw-r--r--drivers/gpu/arm/utgard/common/mali_mem_validation.h19
-rw-r--r--drivers/gpu/arm/utgard/common/mali_mmu.c433
-rw-r--r--drivers/gpu/arm/utgard/common/mali_mmu.h123
-rw-r--r--drivers/gpu/arm/utgard/common/mali_mmu_page_directory.c495
-rw-r--r--drivers/gpu/arm/utgard/common/mali_mmu_page_directory.h110
-rw-r--r--drivers/gpu/arm/utgard/common/mali_osk.h1397
-rw-r--r--drivers/gpu/arm/utgard/common/mali_osk_bitops.h162
-rw-r--r--drivers/gpu/arm/utgard/common/mali_osk_list.h273
-rw-r--r--drivers/gpu/arm/utgard/common/mali_osk_mali.h97
-rw-r--r--drivers/gpu/arm/utgard/common/mali_osk_profiling.h146
-rw-r--r--drivers/gpu/arm/utgard/common/mali_osk_types.h471
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pm.c1362
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pm.h91
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pm_domain.c209
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pm_domain.h104
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pmu.c270
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pmu.h123
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pp.c501
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pp.h137
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pp_job.c308
-rw-r--r--drivers/gpu/arm/utgard/common/mali_pp_job.h574
-rw-r--r--drivers/gpu/arm/utgard/common/mali_scheduler.c1354
-rw-r--r--drivers/gpu/arm/utgard/common/mali_scheduler.h130
-rw-r--r--drivers/gpu/arm/utgard/common/mali_scheduler_types.h29
-rw-r--r--drivers/gpu/arm/utgard/common/mali_session.c144
-rw-r--r--drivers/gpu/arm/utgard/common/mali_session.h127
-rw-r--r--drivers/gpu/arm/utgard/common/mali_soft_job.c438
-rw-r--r--drivers/gpu/arm/utgard/common/mali_soft_job.h190
-rw-r--r--drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.c77
-rw-r--r--drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.h70
-rw-r--r--drivers/gpu/arm/utgard/common/mali_timeline.c1588
-rw-r--r--drivers/gpu/arm/utgard/common/mali_timeline.h535
-rw-r--r--drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.c202
-rw-r--r--drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.h67
-rw-r--r--drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.c158
-rw-r--r--drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.h51
-rw-r--r--drivers/gpu/arm/utgard/common/mali_ukk.h551
-rw-r--r--drivers/gpu/arm/utgard/common/mali_user_settings_db.c147
-rw-r--r--drivers/gpu/arm/utgard/common/mali_user_settings_db.h39
-rw-r--r--drivers/gpu/arm/utgard/include/linux/mali/mali_utgard.h507
-rw-r--r--drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_ioctl.h100
-rw-r--r--drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_profiling_events.h200
-rw-r--r--drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_profiling_gator_api.h315
-rw-r--r--drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_uk_types.h1106
-rw-r--r--drivers/gpu/arm/utgard/linux/license/gpl/mali_kernel_license.h30
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_device_pause_resume.c36
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_kernel_linux.c941
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_kernel_linux.h30
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_kernel_sysfs.c1410
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_kernel_sysfs.h29
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_linux_trace.h162
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory.c510
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory.h143
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_block_alloc.c362
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_block_alloc.h58
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_cow.c754
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_cow.h48
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_defer_bind.c246
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_defer_bind.h65
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_dma_buf.c369
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_dma_buf.h53
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_external.c91
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_external.h29
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_manager.c965
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_manager.h51
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_os_alloc.c805
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_os_alloc.h54
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_swap_alloc.c942
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_swap_alloc.h121
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_types.h208
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_ump.c154
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_ump.h29
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_util.c149
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_util.h20
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_virtual.c127
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_memory_virtual.h35
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_atomics.c59
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_bitmap.c152
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_irq.c200
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_locks.c287
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_locks.h326
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_low_level_mem.c146
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_mali.c390
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_math.c27
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_memory.c61
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_misc.c92
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_notification.c182
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_pm.c83
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_profiling.c1272
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_specific.h72
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_time.c59
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_timers.c76
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_wait_queue.c78
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_osk_wq.c240
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_pmu_power_up_down.c23
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_profiling_events.h17
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_profiling_gator_api.h17
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_profiling_internal.c275
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_profiling_internal.h35
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_sync.c447
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_sync.h111
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_uk_types.h17
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_core.c146
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_gp.c91
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_mem.c333
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_pp.c105
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_profiling.c177
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_soft_job.c90
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_timeline.c88
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_vsync.c39
-rw-r--r--drivers/gpu/arm/utgard/linux/mali_ukk_wrappers.h75
-rw-r--r--drivers/gpu/arm/utgard/platform/arm/arm.c439
-rw-r--r--drivers/gpu/arm/utgard/platform/arm/arm_core_scaling.c122
-rw-r--r--drivers/gpu/arm/utgard/platform/arm/arm_core_scaling.h44
-rw-r--r--drivers/gpu/arm/utgard/platform/hikey/mali_hikey.c683
-rw-r--r--drivers/gpu/arm/utgard/platform/hikey/mali_hikey_hi6220_registers_gpu.h66
-rw-r--r--drivers/gpu/arm/utgard/readme.txt28
-rw-r--r--drivers/gpu/arm/utgard/regs/mali_200_regs.h131
-rw-r--r--drivers/gpu/arm/utgard/regs/mali_gp_regs.h172
-rw-r--r--drivers/gpu/arm/utgard/timestamp-arm11-cc/mali_timestamp.c13
-rw-r--r--drivers/gpu/arm/utgard/timestamp-arm11-cc/mali_timestamp.h48
-rw-r--r--drivers/gpu/arm/utgard/timestamp-default/mali_timestamp.c13
-rw-r--r--drivers/gpu/arm/utgard/timestamp-default/mali_timestamp.h26
-rw-r--r--drivers/gpu/drm/Kconfig10
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_dpm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c7
-rw-r--r--drivers/gpu/drm/ast/ast_main.c2
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c27
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c362
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c8
-rw-r--r--drivers/gpu/drm/drm_irq.c73
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c161
-rw-r--r--drivers/gpu/drm/gma500/gem.c2
-rw-r--r--drivers/gpu/drm/hisilicon/Kconfig10
-rw-r--r--drivers/gpu/drm/hisilicon/Makefile5
-rw-r--r--drivers/gpu/drm/hisilicon/hisi_ade_reg.h494
-rw-r--r--drivers/gpu/drm/hisilicon/hisi_drm_ade.c1100
-rw-r--r--drivers/gpu/drm/hisilicon/hisi_drm_ade.h16
-rw-r--r--drivers/gpu/drm/hisilicon/hisi_drm_drv.c280
-rw-r--r--drivers/gpu/drm/hisilicon/hisi_drm_drv.h19
-rw-r--r--drivers/gpu/drm/hisilicon/hisi_drm_dsi.c832
-rw-r--r--drivers/gpu/drm/hisilicon/hisi_dsi_reg.h89
-rw-r--r--drivers/gpu/drm/i2c/Makefile2
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c620
-rw-r--r--drivers/gpu/drm/i2c/adv7511.h58
-rw-r--r--drivers/gpu/drm/i2c/adv7511_audio.c312
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c10
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c9
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c1
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c10
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c58
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c12
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/sid.h5
-rw-r--r--drivers/gpu/drm/radeon/vce_v1_0.c12
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c21
-rw-r--r--drivers/hid/hid-core.c8
-rw-r--r--drivers/hid/hid-multitouch.c20
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c16
-rw-r--r--drivers/hid/usbhid/hid-core.c77
-rw-r--r--drivers/hid/wacom_wac.c11
-rw-r--r--drivers/hv/channel.c18
-rw-r--r--drivers/hwmon/ads1015.c2
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c11
-rw-r--r--drivers/hwmon/gpio-fan.c7
-rw-r--r--drivers/hwmon/max1111.c6
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c4
-rw-r--r--drivers/hwtracing/coresight/coresight.c2
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c3
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/idle/intel_idle.c108
-rw-r--r--drivers/iio/accel/Kconfig1
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c7
-rw-r--r--drivers/iio/adc/Kconfig1
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c2
-rw-r--r--drivers/iio/dac/mcp4725.c1
-rw-r--r--drivers/iio/gyro/bmg160_core.c9
-rw-r--r--drivers/iio/imu/adis_buffer.c2
-rw-r--r--drivers/iio/inkern.c2
-rw-r--r--drivers/iio/light/acpi-als.c6
-rw-r--r--drivers/iio/light/ltr501.c2
-rw-r--r--drivers/iio/magnetometer/st_magn.h1
-rw-r--r--drivers/iio/pressure/mpl115.c2
-rw-r--r--drivers/infiniband/core/cm.c4
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c4
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c46
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs_mcast.c35
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c24
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c122
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c59
-rw-r--r--drivers/input/misc/Kconfig8
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ati_remote2.c36
-rw-r--r--drivers/input/misc/hisi_powerkey.c238
-rw-r--r--drivers/input/misc/ims-pcu.c4
-rw-r--r--drivers/input/misc/powermate.c3
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/input/mouse/vmmouse.c13
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/amd_iommu_init.c63
-rw-r--r--drivers/iommu/dmar.c7
-rw-r--r--drivers/iommu/intel-iommu.c6
-rw-r--r--drivers/iommu/intel-svm.c37
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/iommu/io-pgtable-arm.c11
-rw-r--r--drivers/iommu/iommu.c3
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c7
-rw-r--r--drivers/irqchip/irq-mxs.c1
-rw-r--r--drivers/irqchip/irq-omap-intc.c27
-rw-r--r--drivers/mailbox/Kconfig8
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/hi6220-mailbox.c371
-rw-r--r--drivers/md/bcache/btree.c5
-rw-r--r--drivers/md/bcache/super.c62
-rw-r--r--drivers/md/bcache/writeback.c37
-rw-r--r--drivers/md/bcache/writeback.h3
-rw-r--r--drivers/md/dm-cache-metadata.c98
-rw-r--r--drivers/md/dm-cache-metadata.h4
-rw-r--r--drivers/md/dm-cache-target.c12
-rw-r--r--drivers/md/dm-exception-store.h2
-rw-r--r--drivers/md/dm-snap-persistent.c5
-rw-r--r--drivers/md/dm-snap-transient.c4
-rw-r--r--drivers/md/dm-snap.c29
-rw-r--r--drivers/md/dm-table.c36
-rw-r--r--drivers/md/dm-thin-metadata.c5
-rw-r--r--drivers/md/dm-thin.c4
-rw-r--r--drivers/md/dm.c17
-rw-r--r--drivers/md/md.c28
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c10
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c3
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid10.c13
-rw-r--r--drivers/md/raid5.c51
-rw-r--r--drivers/md/raid5.h4
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c6
-rw-r--r--drivers/media/dvb-frontends/tda1004x.c9
-rw-r--r--drivers/media/i2c/adv7511.c21
-rw-r--r--drivers/media/i2c/adv7604.c3
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c1
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c26
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c5
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c18
-rw-r--r--drivers/media/platform/coda/coda-bit.c2
-rw-r--r--drivers/media/platform/coda/coda-common.c10
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c1
-rw-r--r--drivers/media/rc/sunxi-cir.c2
-rw-r--r--drivers/media/tuners/si2157.c1
-rw-r--r--drivers/media/usb/au0828/au0828-core.c2
-rw-r--r--drivers/media/usb/au0828/au0828-input.c4
-rw-r--r--drivers/media/usb/au0828/au0828-video.c63
-rw-r--r--drivers/media/usb/au0828/au0828.h9
-rw-r--r--drivers/media/usb/gspca/ov534.c9
-rw-r--r--drivers/media/usb/gspca/topro.c6
-rw-r--r--drivers/media/usb/pwc/pwc-if.c6
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c21
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c6
-rw-r--r--drivers/mfd/Kconfig10
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/hi655x-pmic.c176
-rw-r--r--drivers/misc/Kconfig8
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/vphb.c2
-rw-r--r--drivers/misc/hi6220-sysconfig.c72
-rw-r--r--drivers/misc/mei/bus.c9
-rw-r--r--drivers/misc/mei/main.c12
-rw-r--r--drivers/misc/ti-st/Kconfig8
-rw-r--r--drivers/misc/ti-st/Makefile1
-rw-r--r--drivers/misc/ti-st/st_kim.c94
-rw-r--r--drivers/misc/ti-st/st_ll.c17
-rw-r--r--drivers/misc/ti-st/tty_hci.c542
-rw-r--r--drivers/mmc/core/mmc.c6
-rw-r--r--drivers/mmc/core/sd.c8
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c13
-rw-r--r--drivers/mmc/host/dw_mmc.c25
-rw-r--r--drivers/mmc/host/mmc_spi.c6
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/sdhci-acpi.c30
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c58
-rw-r--r--drivers/mmc/host/sdhci-pci.h3
-rw-r--r--drivers/mmc/host/sdhci.c41
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/mmc/host/sh_mmcif.c84
-rw-r--r--drivers/mmc/host/usdhi6rol0.c3
-rw-r--r--drivers/mtd/nand/nand_base.c3
-rw-r--r--drivers/mtd/onenand/onenand_base.c3
-rw-r--r--drivers/mtd/ubi/upd.c2
-rw-r--r--drivers/net/bonding/bond_main.c109
-rw-r--r--drivers/net/can/usb/ems_usb.c14
-rw-r--r--drivers/net/can/usb/gs_usb.c24
-rw-r--r--drivers/net/dsa/mv88e6xxx.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c25
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c19
-rw-r--r--drivers/net/ethernet/jme.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c24
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c11
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c10
-rw-r--r--drivers/net/ethernet/rocker/rocker.c12
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c2
-rw-r--r--drivers/net/irda/irtty-sir.c10
-rw-r--r--drivers/net/macvtap.c9
-rw-r--r--drivers/net/phy/dp83640.c17
-rw-r--r--drivers/net/ppp/ppp_generic.c36
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/ppp/pptp.c34
-rw-r--r--drivers/net/rionet.c4
-rw-r--r--drivers/net/team/team.c6
-rw-r--r--drivers/net/tun.c12
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/cdc_ncm.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c8
-rw-r--r--drivers/net/usb/usbnet.c7
-rw-r--r--drivers/net/vrf.c13
-rw-r--r--drivers/net/vxlan.c39
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c9
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h10
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c10
-rw-r--r--drivers/nvdimm/bus.c10
-rw-r--r--drivers/nvdimm/namespace_devs.c53
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/nvdimm/region_devs.c56
-rw-r--r--drivers/of/irq.c9
-rw-r--r--drivers/of/of_reserved_mem.c4
-rw-r--r--drivers/pci/bus.c6
-rw-r--r--drivers/pci/host/pci-dra7xx.c3
-rw-r--r--drivers/pci/host/pci-exynos.c3
-rw-r--r--drivers/pci/host/pci-imx6.c3
-rw-r--r--drivers/pci/host/pci-keystone-dw.c11
-rw-r--r--drivers/pci/host/pci-tegra.c2
-rw-r--r--drivers/pci/host/pcie-rcar.c6
-rw-r--r--drivers/pci/host/pcie-spear13xx.c3
-rw-r--r--drivers/pci/host/pcie-xilinx.c3
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c4
-rw-r--r--drivers/pci/pci.c4
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c4
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h1
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c2
-rw-r--r--drivers/pci/probe.c14
-rw-r--r--drivers/pci/xen-pcifront.c10
-rw-r--r--drivers/pcmcia/db1xxx_ss.c11
-rw-r--r--drivers/phy/Kconfig9
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/phy-core.c16
-rw-r--r--drivers/phy/phy-hi6220-usb.c168
-rw-r--r--drivers/phy/phy-twl4030-usb.c14
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c17
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c24
-rw-r--r--drivers/pinctrl/sh-pfc/core.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c17
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h21
-rw-r--r--drivers/platform/x86/ideapad-laptop.c28
-rw-r--r--drivers/platform/x86/intel_scu_ipcutil.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c8
-rw-r--r--drivers/regulator/Kconfig16
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/axp20x-regulator.c4
-rw-r--r--drivers/regulator/core.c22
-rw-r--r--drivers/regulator/hi6220-mtcmos.c269
-rw-r--r--drivers/regulator/hi655x-regulator.c226
-rw-r--r--drivers/reset/Kconfig1
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/hisilicon/Kconfig5
-rw-r--r--drivers/reset/hisilicon/Makefile1
-rw-r--r--drivers/reset/hisilicon/hi6220_reset.c109
-rw-r--r--drivers/s390/block/dasd.c1
-rw-r--r--drivers/s390/block/dasd_alias.c23
-rw-r--r--drivers/s390/block/dasd_diag.c9
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/aacraid/commsup.c37
-rw-r--r--drivers/scsi/aacraid/linit.c12
-rw-r--r--drivers/scsi/aacraid/src.c30
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c4
-rw-r--r--drivers/scsi/ipr.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c16
-rw-r--r--drivers/scsi/scsi_common.c12
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/scsi/sd.c39
-rw-r--r--drivers/scsi/sd.h7
-rw-r--r--drivers/scsi/sg.c5
-rw-r--r--drivers/scsi/sr.c4
-rw-r--r--drivers/scsi/storvsc_drv.c5
-rw-r--r--drivers/sh/pm_runtime.c2
-rw-r--r--drivers/spi/spi-atmel.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c19
-rw-r--r--drivers/staging/android/ion/Kconfig6
-rw-r--r--drivers/staging/android/ion/Makefile2
-rw-r--r--drivers/staging/android/ion/hisilicon/Makefile2
-rw-r--r--drivers/staging/android/ion/hisilicon/hisi_ion.c266
-rw-r--r--drivers/staging/android/ion/ion.c4
-rw-r--r--drivers/staging/android/ion/ion_test.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c12
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c2
-rw-r--r--drivers/staging/panel/panel.c34
-rw-r--r--drivers/staging/speakup/selection.c5
-rw-r--r--drivers/staging/speakup/serialio.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c16
-rw-r--r--drivers/target/target_core_device.c43
-rw-r--r--drivers/target/target_core_file.c29
-rw-r--r--drivers/target/target_core_iblock.c56
-rw-r--r--drivers/target/target_core_tmr.c140
-rw-r--r--drivers/target/target_core_transport.c278
-rw-r--r--drivers/thermal/cpu_cooling.c14
-rw-r--r--drivers/thermal/hisi_thermal.c46
-rw-r--r--drivers/thermal/step_wise.c17
-rw-r--r--drivers/thermal/thermal_core.c88
-rw-r--r--drivers/thermal/thermal_core.h1
-rw-r--r--drivers/tty/n_tty.c7
-rw-r--r--drivers/tty/pty.c21
-rw-r--r--drivers/tty/serial/8250/8250_pci.c50
-rw-r--r--drivers/tty/serial/8250/8250_port.c18
-rw-r--r--drivers/tty/serial/omap-serial.c8
-rw-r--r--drivers/tty/tty_io.c44
-rw-r--r--drivers/tty/tty_mutex.c8
-rw-r--r--drivers/usb/chipidea/otg.c2
-rw-r--r--drivers/usb/class/cdc-acm.c17
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/core/driver.c6
-rw-r--r--drivers/usb/core/hub.c55
-rw-r--r--drivers/usb/core/sysfs.c31
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/dwc2/platform.c32
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/ep0.c5
-rw-r--r--drivers/usb/dwc3/gadget.c70
-rw-r--r--drivers/usb/host/xhci-pci.c52
-rw-r--r--drivers/usb/host/xhci-ring.c10
-rw-r--r--drivers/usb/host/xhci.c8
-rw-r--r--drivers/usb/misc/iowarrior.c6
-rw-r--r--drivers/usb/phy/phy-msm-usb.c37
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c6
-rw-r--r--drivers/usb/serial/cp210x.c6
-rw-r--r--drivers/usb/serial/cypress_m8.c11
-rw-r--r--drivers/usb/serial/digi_acceleport.c19
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h9
-rw-r--r--drivers/usb/serial/mct_u232.c9
-rw-r--r--drivers/usb/serial/option.c34
-rw-r--r--drivers/usb/serial/qcserial.c7
-rw-r--r--drivers/usb/serial/visor.c11
-rw-r--r--drivers/usb/storage/uas.c23
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/storage/usb.c5
-rw-r--r--drivers/vfio/pci/vfio_pci.c9
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c9
-rw-r--r--drivers/vfio/vfio_iommu_type1.c6
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--drivers/video/console/fbcon.c2
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_pci_common.c2
-rw-r--r--drivers/virtio/virtio_pci_modern.c11
-rw-r--r--drivers/watchdog/rc32434_wdt.c2
-rw-r--r--drivers/xen/events/events_base.c28
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c9
-rw-r--r--drivers/xen/xen-scsiback.c10
-rw-r--r--fs/btrfs/async-thread.c2
-rw-r--r--fs/btrfs/backref.c10
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/delayed-inode.c3
-rw-r--r--fs/btrfs/delayed-inode.h2
-rw-r--r--fs/btrfs/disk-io.c34
-rw-r--r--fs/btrfs/extent-tree.c13
-rw-r--r--fs/btrfs/file.c2
-rw-r--r--fs/btrfs/inode-map.c9
-rw-r--r--fs/btrfs/inode-map.h1
-rw-r--r--fs/btrfs/inode.c47
-rw-r--r--fs/btrfs/ioctl.c123
-rw-r--r--fs/btrfs/root-tree.c10
-rw-r--r--fs/btrfs/send.c16
-rw-r--r--fs/btrfs/super.c24
-rw-r--r--fs/btrfs/tree-log.c137
-rw-r--r--fs/btrfs/volumes.c29
-rw-r--r--fs/cifs/cifs_debug.c2
-rw-r--r--fs/cifs/cifs_debug.h9
-rw-r--r--fs/cifs/cifsencrypt.c2
-rw-r--r--fs/cifs/cifsfs.h12
-rw-r--r--fs/cifs/cifssmb.c21
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/cifs/readdir.c1
-rw-r--r--fs/cifs/smb2pdu.c24
-rw-r--r--fs/cifs/transport.c6
-rw-r--r--fs/coredump.c30
-rw-r--r--fs/dcache.c25
-rw-r--r--fs/devpts/inode.c20
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/efivarfs/file.c70
-rw-r--r--fs/efivarfs/inode.c30
-rw-r--r--fs/efivarfs/internal.h3
-rw-r--r--fs/efivarfs/super.c16
-rw-r--r--fs/ext4/balloc.c7
-rw-r--r--fs/ext4/crypto_key.c4
-rw-r--r--fs/ext4/ext4.h23
-rw-r--r--fs/ext4/ialloc.c6
-rw-r--r--fs/ext4/inode.c32
-rw-r--r--fs/ext4/move_extent.c26
-rw-r--r--fs/ext4/resize.c2
-rw-r--r--fs/ext4/super.c47
-rw-r--r--fs/fhandle.c2
-rw-r--r--fs/fs-writeback.c76
-rw-r--r--fs/fuse/cuse.c4
-rw-r--r--fs/fuse/file.c33
-rw-r--r--fs/fuse/fuse_i.h9
-rw-r--r--fs/hostfs/hostfs_kern.c4
-rw-r--r--fs/hpfs/namei.c31
-rw-r--r--fs/hugetlbfs/inode.c19
-rw-r--r--fs/jbd2/journal.c17
-rw-r--r--fs/jffs2/README.Locking5
-rw-r--r--fs/jffs2/build.c75
-rw-r--r--fs/jffs2/dir.c11
-rw-r--r--fs/jffs2/file.c39
-rw-r--r--fs/jffs2/gc.c17
-rw-r--r--fs/jffs2/nodelist.h6
-rw-r--r--fs/locks.c51
-rw-r--r--fs/namei.c22
-rw-r--r--fs/ncpfs/dir.c2
-rw-r--r--fs/nfs/dir.c6
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c8
-rw-r--r--fs/nfs/inode.c56
-rw-r--r--fs/nfs/nfs4file.c4
-rw-r--r--fs/nfs/nfs4proc.c7
-rw-r--r--fs/nfsd/nfs4proc.c1
-rw-r--r--fs/nfsd/nfs4xdr.c13
-rw-r--r--fs/ocfs2/aops.c1
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c24
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c26
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c3
-rw-r--r--fs/ocfs2/dlmglue.c6
-rw-r--r--fs/open.c6
-rw-r--r--fs/overlayfs/copy_up.c41
-rw-r--r--fs/overlayfs/dir.c10
-rw-r--r--fs/overlayfs/inode.c15
-rw-r--r--fs/overlayfs/readdir.c3
-rw-r--r--fs/overlayfs/super.c51
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc/base.c21
-rw-r--r--fs/proc/namespaces.c4
-rw-r--r--fs/proc/task_mmu.c7
-rw-r--r--fs/proc_namespace.c2
-rw-r--r--fs/quota/dquot.c3
-rw-r--r--fs/splice.c3
-rw-r--r--fs/super.c1
-rw-r--r--fs/timerfd.c2
-rw-r--r--fs/udf/inode.c15
-rw-r--r--fs/udf/unicode.c21
-rw-r--r--fs/userfaultfd.c6
-rw-r--r--fs/xfs/libxfs/xfs_format.h2
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c12
-rw-r--r--fs/xfs/xfs_attr_list.c19
-rw-r--r--fs/xfs/xfs_buf.c17
-rw-r--r--fs/xfs/xfs_trans_ail.c1
-rw-r--r--include/asm-generic/bitops/lock.h14
-rw-r--r--include/asm-generic/cputime_nsecs.h5
-rw-r--r--include/crypto/hash.h6
-rw-r--r--include/crypto/if_alg.h11
-rw-r--r--include/crypto/skcipher.h7
-rw-r--r--include/drm/drm_cache.h9
-rw-r--r--include/drm/drm_dp_mst_helper.h27
-rw-r--r--include/drm/drm_fixed.h53
-rw-r--r--include/drm/drm_mipi_dsi.h9
-rw-r--r--include/dt-bindings/clock/hi6220-clock.h4
-rwxr-xr-xinclude/dt-bindings/pinctrl/hisi.h59
-rw-r--r--include/dt-bindings/reset/hisi,hi6220-resets.h67
-rw-r--r--include/linux/ata.h4
-rw-r--r--include/linux/bio.h32
-rw-r--r--include/linux/blkdev.h23
-rw-r--r--include/linux/ceph/messenger.h2
-rw-r--r--include/linux/cgroup-defs.h9
-rw-r--r--include/linux/compiler-gcc.h2
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/console.h1
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/dcache.h14
-rw-r--r--include/linux/device-mapper.h2
-rw-r--r--include/linux/devpts_fs.h4
-rw-r--r--include/linux/efi.h5
-rw-r--r--include/linux/filter.h4
-rw-r--r--include/linux/fs.h12
-rw-r--r--include/linux/hisi_ion.h56
-rw-r--r--include/linux/hrtimer.h34
-rw-r--r--include/linux/hyperv.h18
-rw-r--r--include/linux/if_bridge.h4
-rw-r--r--include/linux/intel-iommu.h3
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/mfd/hi655x-pmic.h55
-rw-r--r--include/linux/mlx5/cq.h2
-rw-r--r--include/linux/mlx5/driver.h5
-rw-r--r--include/linux/mmc/dw_mmc.h6
-rw-r--r--include/linux/module.h17
-rw-r--r--include/linux/netdevice.h22
-rw-r--r--include/linux/nfs_fs.h4
-rw-r--r--include/linux/pci.h18
-rw-r--r--include/linux/platform_data/asoc-s3c.h4
-rw-r--r--include/linux/ptrace.h24
-rw-r--r--include/linux/radix-tree.h22
-rw-r--r--include/linux/rmap.h14
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/shmem_fs.h5
-rw-r--r--include/linux/skbuff.h45
-rw-r--r--include/linux/thermal.h7
-rw-r--r--include/linux/ti_wilink_st.h1
-rw-r--r--include/linux/trace_events.h2
-rw-r--r--include/linux/tracepoint.h16
-rw-r--r--include/linux/tty.h3
-rw-r--r--include/linux/ucs2_string.h4
-rw-r--r--include/linux/usb.h4
-rw-r--r--include/linux/usb_usual.h2
-rw-r--r--include/linux/writeback.h5
-rw-r--r--include/net/af_unix.h4
-rw-r--r--include/net/bonding.h1
-rw-r--r--include/net/dst_metadata.h18
-rw-r--r--include/net/inet_connection_sock.h5
-rw-r--r--include/net/inet_ecn.h19
-rw-r--r--include/net/ip6_route.h12
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--include/net/iw_handler.h6
-rw-r--r--include/net/scm.h1
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/sound/rawmidi.h4
-rw-r--r--include/target/target_core_backend.h3
-rw-r--r--include/target/target_core_base.h3
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--ipc/shm.c53
-rw-r--r--kernel/bpf/helpers.c2
-rw-r--r--kernel/bpf/verifier.c12
-rw-r--r--kernel/cgroup.c45
-rw-r--r--kernel/cpuset.c71
-rw-r--r--kernel/events/core.c22
-rw-r--r--kernel/futex.c7
-rw-r--r--kernel/futex_compat.c2
-rw-r--r--kernel/irq/handle.c5
-rw-r--r--kernel/kcmp.c4
-rw-r--r--kernel/memremap.c6
-rw-r--r--kernel/module.c120
-rw-r--r--kernel/panic.c3
-rw-r--r--kernel/power/hibernate.c1
-rw-r--r--kernel/printk/printk.c35
-rw-r--r--kernel/ptrace.c39
-rw-r--r--kernel/resource.c5
-rw-r--r--kernel/sched/core.c3
-rw-r--r--kernel/sched/cputime.c14
-rw-r--r--kernel/sched/sched.h13
-rw-r--r--kernel/seccomp.c22
-rw-r--r--kernel/sys.c20
-rw-r--r--kernel/sysctl_binary.c2
-rw-r--r--kernel/time/hrtimer.c55
-rw-r--r--kernel/time/itimer.c2
-rw-r--r--kernel/time/posix-clock.c4
-rw-r--r--kernel/time/posix-timers.c2
-rw-r--r--kernel/time/tick-sched.c4
-rw-r--r--kernel/time/timekeeping.c3
-rw-r--r--kernel/time/timer_list.c2
-rw-r--r--kernel/trace/trace.c7
-rw-r--r--kernel/trace/trace_events.c17
-rw-r--r--kernel/trace/trace_events_filter.c13
-rw-r--r--kernel/trace/trace_irqsoff.c8
-rw-r--r--kernel/trace/trace_printk.c3
-rw-r--r--kernel/trace/trace_stack.c7
-rw-r--r--kernel/watchdog.c9
-rw-r--r--kernel/workqueue.c18
-rw-r--r--lib/Kconfig2
-rw-r--r--lib/dma-debug.c2
-rw-r--r--lib/dump_stack.c7
-rw-r--r--lib/klist.c6
-rw-r--r--lib/libcrc32c.c1
-rw-r--r--lib/radix-tree.c12
-rw-r--r--lib/string_helpers.c63
-rw-r--r--lib/ucs2_string.c62
-rw-r--r--mm/backing-dev.c2
-rw-r--r--mm/balloon_compaction.c4
-rw-r--r--mm/memcontrol.c44
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory.c14
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mlock.c2
-rw-r--r--mm/mmap.c89
-rw-r--r--mm/page_alloc.c46
-rw-r--r--mm/page_isolation.c8
-rw-r--r--mm/pgtable-generic.c4
-rw-r--r--mm/process_vm_access.c2
-rw-r--r--mm/shmem.c9
-rw-r--r--mm/zsmalloc.c14
-rw-r--r--net/ax25/ax25_ip.c15
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c10
-rw-r--r--net/batman-adv/hard-interface.h12
-rw-r--r--net/batman-adv/network-coding.c19
-rw-r--r--net/batman-adv/originator.c149
-rw-r--r--net/batman-adv/originator.h1
-rw-r--r--net/batman-adv/translation-table.c28
-rw-r--r--net/bluetooth/6lowpan.c7
-rw-r--r--net/bluetooth/hci_conn.c6
-rw-r--r--net/bluetooth/hci_request.c28
-rw-r--r--net/bluetooth/mgmt.c4
-rw-r--r--net/bluetooth/smp.c16
-rw-r--r--net/bridge/br.c3
-rw-r--r--net/bridge/br_device.c8
-rw-r--r--net/bridge/br_stp.c13
-rw-r--r--net/ceph/messenger.c91
-rw-r--r--net/ceph/osd_client.c4
-rw-r--r--net/core/dev.c16
-rw-r--r--net/core/filter.c38
-rw-r--r--net/core/flow_dissector.c16
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/core/rtnetlink.c1
-rw-r--r--net/core/scm.c7
-rw-r--r--net/core/skbuff.c24
-rw-r--r--net/core/sysctl_net_core.c10
-rw-r--r--net/dccp/ipv4.c16
-rw-r--r--net/dccp/ipv6.c14
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/fib_frontend.c20
-rw-r--r--net/ipv4/igmp.c3
-rw-r--r--net/ipv4/inet_connection_sock.c14
-rw-r--r--net/ipv4/ip_fragment.c1
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/ip_tunnel.c3
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_masquerade_ipv4.c12
-rw-r--r--net/ipv4/ping.c4
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/route.c77
-rw-r--r--net/ipv4/tcp.c16
-rw-r--r--net/ipv4/tcp_ipv4.c46
-rw-r--r--net/ipv4/tcp_metrics.c2
-rw-r--r--net/ipv4/tcp_minisocks.c3
-rw-r--r--net/ipv4/tcp_yeah.c2
-rw-r--r--net/ipv4/udp.c16
-rw-r--r--net/ipv4/udp_tunnel.c2
-rw-r--r--net/ipv6/addrconf.c7
-rw-r--r--net/ipv6/datagram.c3
-rw-r--r--net/ipv6/exthdrs_core.c6
-rw-r--r--net/ipv6/ip6_flowlabel.c5
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/ip6_output.c16
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/ipv6/mcast.c3
-rw-r--r--net/ipv6/route.c7
-rw-r--r--net/ipv6/tcp_ipv6.c21
-rw-r--r--net/ipv6/udp.c10
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c2
-rw-r--r--net/iucv/af_iucv.c3
-rw-r--r--net/l2tp/l2tp_ip.c8
-rw-r--r--net/l2tp/l2tp_ip6.c8
-rw-r--r--net/l2tp/l2tp_netlink.c18
-rw-r--r--net/mac80211/agg-rx.c2
-rw-r--r--net/mac80211/ibss.c23
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/mac80211/mesh.c11
-rw-r--r--net/mac80211/mesh.h4
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/rc80211_minstrel.c2
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c7
-rw-r--r--net/mac80211/rx.c42
-rw-r--r--net/mac80211/scan.c12
-rw-r--r--net/mac80211/sta_info.c37
-rw-r--r--net/mpls/af_mpls.c3
-rw-r--r--net/openvswitch/datapath.c5
-rw-r--r--net/openvswitch/vport-vxlan.c2
-rw-r--r--net/packet/af_packet.c37
-rw-r--r--net/phonet/af_phonet.c4
-rw-r--r--net/rfkill/core.c16
-rw-r--r--net/sched/cls_flower.c10
-rw-r--r--net/sched/sch_api.c1
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/protocol.c43
-rw-r--r--net/sctp/socket.c11
-rw-r--r--net/sctp/sysctl.c2
-rw-r--r--net/socket.c38
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/xprtsock.c49
-rw-r--r--net/switchdev/switchdev.c15
-rw-r--r--net/tipc/bcast.c4
-rw-r--r--net/tipc/node.c12
-rw-r--r--net/tipc/socket.c33
-rw-r--r--net/tipc/subscr.c11
-rw-r--r--net/unix/af_unix.c50
-rw-r--r--net/unix/diag.c2
-rw-r--r--net/unix/garbage.c17
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/wext-core.c52
-rw-r--r--net/xfrm/xfrm_input.c3
-rw-r--r--net/xfrm/xfrm_output.c2
-rwxr-xr-xscripts/bloat-o-meter8
-rw-r--r--scripts/coccinelle/iterators/use_after_iter.cocci2
-rw-r--r--scripts/kconfig/Makefile4
-rwxr-xr-xscripts/ld-version.sh2
-rwxr-xr-xscripts/link-vmlinux.sh2
-rwxr-xr-xscripts/package/mkspec8
-rwxr-xr-xscripts/recordmcount.pl3
-rw-r--r--security/commoncap.c7
-rw-r--r--security/integrity/evm/evm_main.c3
-rw-r--r--security/smack/smack_lsm.c8
-rw-r--r--security/yama/yama_lsm.c4
-rw-r--r--sound/core/compress_offload.c11
-rw-r--r--sound/core/control.c2
-rw-r--r--sound/core/control_compat.c90
-rw-r--r--sound/core/hrtimer.c3
-rw-r--r--sound/core/oss/pcm_oss.c21
-rw-r--r--sound/core/pcm_compat.c190
-rw-r--r--sound/core/pcm_lib.c2
-rw-r--r--sound/core/pcm_native.c16
-rw-r--r--sound/core/rawmidi.c134
-rw-r--r--sound/core/rawmidi_compat.c53
-rw-r--r--sound/core/seq/oss/seq_oss.c2
-rw-r--r--sound/core/seq/oss/seq_oss_device.h1
-rw-r--r--sound/core/seq/oss/seq_oss_init.c18
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c2
-rw-r--r--sound/core/seq/seq_clientmgr.c5
-rw-r--r--sound/core/seq/seq_compat.c9
-rw-r--r--sound/core/seq/seq_memory.c13
-rw-r--r--sound/core/seq/seq_ports.c236
-rw-r--r--sound/core/seq/seq_queue.c2
-rw-r--r--sound/core/seq/seq_timer.c87
-rw-r--r--sound/core/seq/seq_virmidi.c23
-rw-r--r--sound/core/timer.c180
-rw-r--r--sound/core/timer_compat.c18
-rw-r--r--sound/drivers/dummy.c35
-rw-r--r--sound/firewire/bebob/bebob_stream.c14
-rw-r--r--sound/isa/Kconfig4
-rw-r--r--sound/pci/Kconfig3
-rw-r--r--sound/pci/hda/hda_bind.c42
-rw-r--r--sound/pci/hda/hda_generic.c91
-rw-r--r--sound/pci/hda/hda_intel.c35
-rw-r--r--sound/pci/hda/hda_jack.c2
-rw-r--r--sound/pci/hda/hda_jack.h2
-rw-r--r--sound/pci/hda/patch_ca0132.c5
-rw-r--r--sound/pci/hda/patch_cirrus.c35
-rw-r--r--sound/pci/hda/patch_conexant.c7
-rw-r--r--sound/pci/hda/patch_hdmi.c9
-rw-r--r--sound/pci/hda/patch_realtek.c166
-rw-r--r--sound/pci/hda/patch_sigmatel.c6
-rw-r--r--sound/pci/intel8x0.c1
-rw-r--r--sound/pci/rme9652/hdsp.c4
-rw-r--r--sound/pci/rme9652/hdspm.c16
-rw-r--r--sound/soc/codecs/rt5645.c2
-rw-r--r--sound/soc/codecs/wm5110.c8
-rw-r--r--sound/soc/codecs/wm8958-dsp2.c8
-rw-r--r--sound/soc/codecs/wm8994.c4
-rw-r--r--sound/soc/samsung/ac97.c26
-rw-r--r--sound/soc/samsung/dma.h2
-rw-r--r--sound/soc/samsung/dmaengine.c4
-rw-r--r--sound/soc/samsung/i2s.c47
-rw-r--r--sound/soc/samsung/pcm.c20
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c4
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c4
-rw-r--r--sound/soc/samsung/spdif.c10
-rw-r--r--sound/soc/soc-compress.c23
-rw-r--r--sound/soc/soc-dapm.c8
-rw-r--r--sound/soc/soc-pcm.c3
-rw-r--r--sound/sparc/Kconfig1
-rw-r--r--sound/usb/card.c2
-rw-r--r--sound/usb/clock.c2
-rw-r--r--sound/usb/endpoint.c3
-rw-r--r--sound/usb/midi.c1
-rw-r--r--sound/usb/mixer_maps.c14
-rw-r--r--sound/usb/mixer_quirks.c6
-rw-r--r--sound/usb/pcm.c2
-rw-r--r--sound/usb/quirks.c47
-rw-r--r--sound/usb/stream.c6
-rw-r--r--tools/hv/Makefile2
-rw-r--r--tools/hv/hv_vss_daemon.c2
-rw-r--r--tools/lib/traceevent/event-parse.c5
-rw-r--r--tools/perf/util/parse-events.c9
-rw-r--r--tools/perf/util/pmu.c15
-rw-r--r--tools/perf/util/session.c2
-rw-r--r--tools/perf/util/setup.py4
-rw-r--r--tools/perf/util/stat.c1
-rwxr-xr-xtools/testing/selftests/efivarfs/efivarfs.sh19
-rw-r--r--tools/testing/selftests/efivarfs/open-unlink.c72
-rw-r--r--virt/kvm/arm/arch_timer.c9
-rw-r--r--virt/kvm/arm/vgic.c4
-rw-r--r--virt/kvm/async_pf.c2
-rw-r--r--virt/kvm/kvm_main.c24
1289 files changed, 65232 insertions, 5397 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index 3a4abfc44f5e..136ba17d2da0 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -134,19 +134,21 @@ Description:
enabled for the device. Developer can write y/Y/1 or n/N/0 to
the file to enable/disable the feature.
-What: /sys/bus/usb/devices/.../power/usb3_hardware_lpm
-Date: June 2015
+What: /sys/bus/usb/devices/.../power/usb3_hardware_lpm_u1
+ /sys/bus/usb/devices/.../power/usb3_hardware_lpm_u2
+Date: November 2015
Contact: Kevin Strasser <kevin.strasser@linux.intel.com>
+ Lu Baolu <baolu.lu@linux.intel.com>
Description:
If CONFIG_PM is set and a USB 3.0 lpm-capable device is plugged
in to a xHCI host which supports link PM, it will check if U1
and U2 exit latencies have been set in the BOS descriptor; if
- the check is is passed and the host supports USB3 hardware LPM,
+ the check is passed and the host supports USB3 hardware LPM,
USB3 hardware LPM will be enabled for the device and the USB
- device directory will contain a file named
- power/usb3_hardware_lpm. The file holds a string value (enable
- or disable) indicating whether or not USB3 hardware LPM is
- enabled for the device.
+ device directory will contain two files named
+ power/usb3_hardware_lpm_u1 and power/usb3_hardware_lpm_u2. These
+ files hold a string value (enable or disable) indicating whether
+ or not USB3 hardware LPM U1 or U2 is enabled for the device.
What: /sys/bus/usb/devices/.../removable
Date: February 2012
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index 3a07a87fef20..6aca64f289b6 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -242,6 +242,23 @@ nodes to be present and contain the properties described below.
Definition: Specifies the syscon node controlling the cpu core
power domains.
+ - dynamic-power-coefficient
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: A u32 value that represents the running time dynamic
+ power coefficient in units of mW/MHz/uVolt^2. The
+ coefficient can either be calculated from power
+ measurements or derived by analysis.
+
+ The dynamic power consumption of the CPU is
+ proportional to the square of the Voltage (V) and
+ the clock frequency (f). The coefficient is used to
+ calculate the dynamic power as below -
+
+ Pdyn = dynamic-power-coefficient * V^2 * f
+
+ where voltage is in uV, frequency is in MHz.
+
Example 1 (dual-cluster big.LITTLE system 32-bit):
cpus {
diff --git a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
index 6ac7c000af22..58495ea35c4b 100644
--- a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
+++ b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
@@ -229,3 +229,9 @@ Required Properties:
[1]: bootwrapper size
[2]: relocation physical address
[3]: relocation size
+
+-----------------------------------------------------------------------
+Hisilicon ion
+
+Required properties:
+- compatible : "hisilicon,ion";
diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
index 9f4e5136e568..12af302bca6a 100644
--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
+++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
@@ -23,6 +23,7 @@ Optional properties:
during suspend.
- ti,no-reset-on-init: When present, the module should not be reset at init
- ti,no-idle-on-init: When present, the module should not be idled at init
+- ti,no-idle: When present, the module is never allowed to idle.
Example:
diff --git a/Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt b/Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt
new file mode 100644
index 000000000000..2777a2cad7cd
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt
@@ -0,0 +1,42 @@
+Device-Tree bindings for hisilicon ADE display controller driver
+
+ADE (Advanced Display Engine) is the display controller which grab image
+data from memory, do composition, do post image processing, generate RGB
+timing stream and transfer to DSI.
+
+Required properties:
+- compatible: value should be one of the following
+ "hisilicon,hi6220-ade".
+- reg: physical base address and length of the controller's registers.
+- reg-names: name of physical base.
+- interrupt: the interrupt number.
+- clocks: the clocks needed.
+- clock-names: the name of the clocks.
+- ade_core_clk_rate: ADE core clock rate.
+- media_noc_clk_rate: media noc module clock rate.
+
+
+A example of HiKey board hi6220 SoC specific DT entry:
+Example:
+
+ ade: ade@f4100000 {
+ compatible = "hisilicon,hi6220-ade";
+ reg = <0x0 0xf4100000 0x0 0x7800>,
+ <0x0 0xf4410000 0x0 0x1000>;
+ reg-names = "ade_base",
+ "media_base";
+ interrupts = <0 115 4>;
+
+ clocks = <&media_ctrl HI6220_ADE_CORE>,
+ <&media_ctrl HI6220_CODEC_JPEG>,
+ <&media_ctrl HI6220_ADE_PIX_SRC>,
+ <&media_ctrl HI6220_PLL_SYS>,
+ <&media_ctrl HI6220_PLL_SYS_MEDIA>;
+ clock-names = "clk_ade_core",
+ "aclk_codec_jpeg_src",
+ "clk_ade_pix",
+ "clk_syspll_src",
+ "clk_medpll_src";
+ ade_core_clk_rate = <360000000>;
+ media_noc_clk_rate = <288000000>;
+ };
diff --git a/Documentation/devicetree/bindings/display/hisilicon/hisi-drm.txt b/Documentation/devicetree/bindings/display/hisilicon/hisi-drm.txt
new file mode 100644
index 000000000000..fd930260744f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/hisilicon/hisi-drm.txt
@@ -0,0 +1,66 @@
+Hisilicon DRM master device
+
+The Hisilicon DRM master device is a virtual device needed to list all
+the other display relevant nodes that comprise the display subsystem.
+
+
+Required properties:
+- compatible: Should be "hisilicon,<chip>-dss"
+- #address-cells: should be set to 2.
+- #size-cells: should be set to 2.
+- range: to allow probing of subdevices.
+
+Optional properties:
+- dma-coherent: Present if dma operations are coherent.
+
+Required sub nodes:
+All the device nodes of display subsystem of SoC should be the sub nodes.
+Such as display controller node, DSI node and so on.
+
+A example of HiKey board hi6220 SoC specific DT entry:
+Example:
+
+ display-subsystem {
+ compatible = "hisilicon,hi6220-dss";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ dma-coherent;
+
+ ade: ade@f4100000 {
+ compatible = "hisilicon,hi6220-ade";
+ reg = <0x0 0xf4100000 0x0 0x7800>,
+ <0x0 0xf4410000 0x0 0x1000>;
+ reg-names = "ade_base",
+ "media_base";
+ interrupts = <0 115 4>; /* ldi interrupt */
+
+ clocks = <&media_ctrl HI6220_ADE_CORE>,
+ <&media_ctrl HI6220_CODEC_JPEG>,
+ <&media_ctrl HI6220_ADE_PIX_SRC>,
+ <&media_ctrl HI6220_PLL_SYS>,
+ <&media_ctrl HI6220_PLL_SYS_MEDIA>;
+ /*clock name*/
+ clock-names = "clk_ade_core",
+ "aclk_codec_jpeg_src",
+ "clk_ade_pix",
+ "clk_syspll_src",
+ "clk_medpll_src";
+ ade_core_clk_rate = <360000000>;
+ media_noc_clk_rate = <288000000>;
+ };
+
+ dsi: dsi@0xf4107800 {
+ compatible = "hisilicon,hi6220-dsi";
+ reg = <0x0 0xf4107800 0x0 0x100>;
+ clocks = <&media_ctrl HI6220_DSI_PCLK>;
+ clock-names = "pclk_dsi";
+
+ port {
+ dsi_out: endpoint {
+ remote-endpoint = <&adv_in>;
+ };
+ };
+
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/hisilicon/hisi-dsi.txt b/Documentation/devicetree/bindings/display/hisilicon/hisi-dsi.txt
new file mode 100644
index 000000000000..30abaa853b00
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/hisilicon/hisi-dsi.txt
@@ -0,0 +1,53 @@
+Device-Tree bindings for hisilicon DSI controller driver
+
+A DSI controller resides in the middle of display controller and external
+HDMI converter.
+
+Required properties:
+- compatible: value should be one of the following
+ "hisilicon,hi6220-dsi".
+- reg: physical base address and length of the controller's registers.
+- clocks: the clocks needed.
+- clock-names: the name of the clocks.
+- port: DSI controller output port. This contains one endpoint subnode, with its
+ remote-endpoint set to the phandle of the connected external HDMI endpoint.
+ See Documentation/devicetree/bindings/graph.txt for device graph info.
+
+A example of HiKey board hi6220 SoC and board specific DT entry:
+Example:
+
+SoC specific:
+ dsi: dsi@0xf4107800 {
+ compatible = "hisilicon,hi6220-dsi";
+ reg = <0x0 0xf4107800 0x0 0x100>;
+ clocks = <&media_ctrl HI6220_DSI_PCLK>;
+ clock-names = "pclk_dsi";
+
+ port {
+ dsi_out: endpoint {
+ remote-endpoint = <&adv_in>;
+ };
+ };
+
+ };
+
+Board specific:
+ i2c2: i2c@f7102000 {
+ status = "ok";
+
+ adv7533: adv7533@39 {
+ compatible = "adi,adv7533";
+ reg = <0x39>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <1 2>;
+ pd-gpio = <&gpio0 4 0>;
+ adi,dsi-lanes = <4>;
+
+ port {
+ adv_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/mailbox/hisilicon,hi6220-mailbox.txt b/Documentation/devicetree/bindings/mailbox/hisilicon,hi6220-mailbox.txt
new file mode 100644
index 000000000000..3dfb0b0ecd33
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/hisilicon,hi6220-mailbox.txt
@@ -0,0 +1,57 @@
+Hisilicon Hi6220 Mailbox Driver
+===============================
+
+Hisilicon Hi6220 mailbox supports up to 32 channels. Each channel
+is unidirectional with a maximum message size of 8 words. I/O is
+performed using register access (there is no DMA) and the cell
+raises an interrupt when messages are received.
+
+Mailbox Device Node:
+====================
+
+Required properties:
+--------------------
+- compatible: Shall be "hisilicon,hi6220-mbox"
+- reg: Contains the mailbox register address range (base
+ address and length); the first item is for IPC
+ registers, the second item is shared buffer for
+ slots.
+- #mbox-cells Common mailbox binding property to identify the number
+ of cells required for the mailbox specifier. Should be 1.
+- interrupts: Contains the interrupt information for the mailbox
+ device. The format is dependent on which interrupt
+ controller the SoCs use.
+
+Example:
+--------
+
+ mailbox: mailbox@F7510000 {
+ #mbox-cells = <1>;
+ compatible = "hisilicon,hi6220-mbox";
+ reg = <0x0 0xF7510000 0x0 0x1000>, /* IPC_S */
+ <0x0 0x06DFF800 0x0 0x0800>; /* Mailbox */
+ interrupt-parent = <&gic>;
+ interrupts = <0 94 4>;
+ };
+
+
+Mailbox client
+===============
+
+"mboxes" and the optional "mbox-names" (please see
+Documentation/devicetree/bindings/mailbox/mailbox.txt for details). Each value
+of the mboxes property should contain a phandle to the mailbox controller
+device node and second argument is the channel index. It must be 0 (hardware
+support only one channel). The equivalent "mbox-names" property value can be
+used to give a name to the communication channel to be used by the client user.
+
+Example:
+--------
+
+ stub_clock: stub_clock {
+ compatible = "hisilicon,hi6220-stub-clk";
+ hisilicon,hi6220-clk-sram = <&sram>;
+ #clock-cells = <1>;
+ mbox-names = "mbox-tx";
+ mboxes = <&mailbox 1>;
+ };
diff --git a/Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt b/Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt
new file mode 100644
index 000000000000..5edc310470b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/hisilicon,hi655x.txt
@@ -0,0 +1,27 @@
+Hisilicon hi655x Power Management Integrated Circuit (PMIC)
+
+The hardware layout for access PMIC Hi655x from AP SoC Hi6220.
+Between PMIC Hi655x and Hi6220, the physical signal channel is SSI.
+We can use memory-mapped I/O to communicate.
+
++----------------+ +-------------+
+| | | |
+| Hi6220 | SSI bus | Hi655x |
+| |-------------| |
+| |(REGMAP_MMIO)| |
++----------------+ +-------------+
+
+Required properties:
+- compatible: Should be "hisilicon,hi655x-pmic"
+- reg: Base address of PMIC on hi6220 soc
+- interrupt-controller: Hi655x has internal IRQs (has own IRQ domain).
+- pmic-gpios: The gpio used by PMIC irq.
+
+Example:
+ pmic: pmic@f8000000 {
+ compatible = "hisilicon,hi655x-pmic";
+ reg = <0x0 0xf8000000 0x0 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ }
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index 8636f5ae97e5..9b4896c11716 100644
--- a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
@@ -39,6 +39,10 @@ Required Properties:
Optional properties:
+* resets: phandle + reset specifier pair, intended to represent hardware
+ reset signal present internally in some host controller IC designs.
+ See Documentation/devicetree/bindings/reset/reset.txt for details.
+
* clocks: from common clock binding: handle to biu and ciu clocks for the
bus interface unit clock and the card interface unit clock.
@@ -48,7 +52,7 @@ Optional properties:
clock-frequency. It is an error to omit both the ciu clock and the
clock-frequency.
-* clock-frequency: should be the frequency (in Hz) of the ciu clock. If this
+* clock-frequency: should be tke frequency (in Hz) of the ciu clock. If this
is specified and the ciu clock is specified then we'll try to set the ciu
clock to this at probe time.
diff --git a/Documentation/devicetree/bindings/phy/phy-hi6220-usb.txt b/Documentation/devicetree/bindings/phy/phy-hi6220-usb.txt
new file mode 100644
index 000000000000..f17a56e2152f
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-hi6220-usb.txt
@@ -0,0 +1,16 @@
+Hisilicon hi6220 usb PHY
+-----------------------
+
+Required properties:
+- compatible: should be "hisilicon,hi6220-usb-phy"
+- #phy-cells: must be 0
+- hisilicon,peripheral-syscon: phandle of syscon used to control phy.
+Refer to phy/phy-bindings.txt for the generic PHY binding properties
+
+Example:
+ usb_phy: usbphy {
+ compatible = "hisilicon,hi6220-usb-phy";
+ #phy-cells = <0>;
+ phy-supply = <&fixed_5v_hub>;
+ hisilicon,peripheral-syscon = <&sys_ctrl>;
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
index 08a4a32c8eb0..0326154c7925 100644
--- a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
@@ -134,12 +134,12 @@ mfio80 ddr_debug, mips_trace_data, mips_debug
mfio81 dreq0, mips_trace_data, eth_debug
mfio82 dreq1, mips_trace_data, eth_debug
mfio83 mips_pll_lock, mips_trace_data, usb_debug
-mfio84 sys_pll_lock, mips_trace_data, usb_debug
-mfio85 wifi_pll_lock, mips_trace_data, sdhost_debug
-mfio86 bt_pll_lock, mips_trace_data, sdhost_debug
-mfio87 rpu_v_pll_lock, dreq2, socif_debug
-mfio88 rpu_l_pll_lock, dreq3, socif_debug
-mfio89 audio_pll_lock, dreq4, dreq5
+mfio84 audio_pll_lock, mips_trace_data, usb_debug
+mfio85 rpu_v_pll_lock, mips_trace_data, sdhost_debug
+mfio86 rpu_l_pll_lock, mips_trace_data, sdhost_debug
+mfio87 sys_pll_lock, dreq2, socif_debug
+mfio88 wifi_pll_lock, dreq3, socif_debug
+mfio89 bt_pll_lock, dreq4, dreq5
tck
trstn
tdi
diff --git a/Documentation/devicetree/bindings/regulator/hisilicon,hi655x-regulator.txt b/Documentation/devicetree/bindings/regulator/hisilicon,hi655x-regulator.txt
new file mode 100644
index 000000000000..09d3884e7cc2
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/hisilicon,hi655x-regulator.txt
@@ -0,0 +1,28 @@
+Hisilicon Hi655x Voltage regulators
+
+Note:
+The hi655x regulator control is managed by hi655x Power IC.
+So the node of this regulator must be child node of hi655x
+PMIC node.
+
+The driver uses the regulator core framework, so please also
+take the bindings of regulator.txt for reference.
+
+The valid names for regulators are:
+
+LDO2 LDO7 LDO10 LDO13 LDO14 LDO15 LDO17 LDO19 LDO21 LDO22
+
+Example:
+ pmic: pmic@f8000000 {
+ compatible = "hisilicon,hi655x-pmic";
+ ...
+ regulators {
+ ldo2: LDO2@a21 {
+ regulator-compatible = "LDO2_2V8";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <3200000>;
+ regulator-enable-ramp-delay = <120>;
+ };
+ ...
+ }
+ }
diff --git a/Documentation/devicetree/bindings/reset/hisilicon,hi6220-reset.txt b/Documentation/devicetree/bindings/reset/hisilicon,hi6220-reset.txt
new file mode 100644
index 000000000000..e0b185a944ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/hisilicon,hi6220-reset.txt
@@ -0,0 +1,34 @@
+Hisilicon System Reset Controller
+======================================
+
+Please also refer to reset.txt in this directory for common reset
+controller binding usage.
+
+The reset controller registers are part of the system-ctl block on
+hi6220 SoC.
+
+Required properties:
+- compatible: may be "hisilicon,hi6220-sysctrl"
+- reg: should be register base and length as documented in the
+ datasheet
+- #reset-cells: 1, see below
+
+Example:
+sys_ctrl: sys_ctrl@f7030000 {
+ compatible = "hisilicon,hi6220-sysctrl", "syscon";
+ reg = <0x0 0xf7030000 0x0 0x2000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+};
+
+Specifying reset lines connected to IP modules
+==============================================
+example:
+
+ uart1: serial@..... {
+ ...
+ resets = <&sys_ctrl PERIPH_RSTEN3_UART1>;
+ ...
+ };
+
+The index could be found in <dt-bindings/reset/hisi,hi6220-resets.h>.
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index fd132cbee70e..221368207ca4 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -4,6 +4,7 @@ Platform DesignWare HS OTG USB 2.0 controller
Required properties:
- compatible : One of:
- brcm,bcm2835-usb: The DWC2 USB controller instance in the BCM2835 SoC.
+ - hisilicon,hi6220-usb: The DWC2 USB controller instance in the hi6220 SoC.
- rockchip,rk3066-usb: The DWC2 USB controller instance in the rk3066 Soc;
- "rockchip,rk3188-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3188 Soc;
- "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc;
diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt
index c477af086e65..686a64bba775 100644
--- a/Documentation/filesystems/efivarfs.txt
+++ b/Documentation/filesystems/efivarfs.txt
@@ -14,3 +14,10 @@ filesystem.
efivarfs is typically mounted like this,
mount -t efivarfs none /sys/firmware/efi/efivars
+
+Due to the presence of numerous firmware bugs where removing non-standard
+UEFI variables causes the system firmware to fail to POST, efivarfs
+files that are not well-known standardized variables are created
+as immutable files. This doesn't prevent removal - "chattr -i" will work -
+but it does prevent this kind of failure from being accomplished
+accidentally.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 742f69d18fc8..0e4102ae1a61 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3928,6 +3928,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
sector if the number is odd);
i = IGNORE_DEVICE (don't bind to this
device);
+ j = NO_REPORT_LUNS (don't use report luns
+ command, uas only);
l = NOT_LOCKABLE (don't try to lock and
unlock ejectable media);
m = MAX_SECTORS_64 (don't transfer more
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index 4a15c90bc11d..0a94ffe17ab6 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -537,17 +537,18 @@ relevant attribute files are usb2_hardware_lpm and usb3_hardware_lpm.
can write y/Y/1 or n/N/0 to the file to enable/disable
USB2 hardware LPM manually. This is for test purpose mainly.
- power/usb3_hardware_lpm
+ power/usb3_hardware_lpm_u1
+ power/usb3_hardware_lpm_u2
When a USB 3.0 lpm-capable device is plugged in to a
xHCI host which supports link PM, it will check if U1
and U2 exit latencies have been set in the BOS
descriptor; if the check is is passed and the host
supports USB3 hardware LPM, USB3 hardware LPM will be
- enabled for the device and this file will be created.
- The file holds a string value (enable or disable)
- indicating whether or not USB3 hardware LPM is
- enabled for the device.
+ enabled for the device and these files will be created.
+ The files hold a string value (enable or disable)
+ indicating whether or not USB3 hardware LPM U1 or U2
+ is enabled for the device.
USB Port Power Control
----------------------
diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
index 3a4d681c3e98..b653641d4261 100644
--- a/Documentation/virtual/kvm/mmu.txt
+++ b/Documentation/virtual/kvm/mmu.txt
@@ -358,7 +358,8 @@ In the first case there are two additional complications:
- if CR4.SMEP is enabled: since we've turned the page into a kernel page,
the kernel may now execute it. We handle this by also setting spte.nx.
If we get a user fetch or read fault, we'll change spte.u=1 and
- spte.nx=gpte.nx back.
+ spte.nx=gpte.nx back. For this to work, KVM forces EFER.NX to 1 when
+ shadow paging is in use.
- if CR4.SMAP is disabled: since the page has been changed to a kernel
page, it can not be reused when CR4.SMAP is enabled. We set
CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
diff --git a/MAINTAINERS b/MAINTAINERS
index 233f83464814..4c3e1d2ac31b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -230,13 +230,13 @@ F: kernel/sys_ni.c
ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
M: Hans de Goede <hdegoede@redhat.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/abituguru.c
ABIT UGURU 3 HARDWARE MONITOR DRIVER
M: Alistair John Strachan <alistair@devzero.co.uk>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/abituguru3.c
@@ -373,14 +373,14 @@ S: Maintained
ADM1025 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/adm1025
F: drivers/hwmon/adm1025.c
ADM1029 HARDWARE MONITOR DRIVER
M: Corentin Labbe <clabbe.montjoie@gmail.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/adm1029.c
@@ -425,7 +425,7 @@ F: drivers/video/backlight/adp8860_bl.c
ADS1015 HARDWARE MONITOR DRIVER
M: Dirk Eibach <eibach@gdsys.de>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/ads1015
F: drivers/hwmon/ads1015.c
@@ -438,7 +438,7 @@ F: drivers/macintosh/therm_adt746x.c
ADT7475 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/adt7475
F: drivers/hwmon/adt7475.c
@@ -615,7 +615,7 @@ F: include/linux/ccp.h
AMD FAM15H PROCESSOR POWER MONITORING DRIVER
M: Andreas Herrmann <herrmann.der.user@googlemail.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/fam15h_power
F: drivers/hwmon/fam15h_power.c
@@ -779,7 +779,7 @@ F: drivers/input/mouse/bcm5974.c
APPLE SMC DRIVER
M: Henrik Rydberg <rydberg@bitmath.org>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Odd fixes
F: drivers/hwmon/applesmc.c
@@ -1777,7 +1777,7 @@ F: include/media/as3645a.h
ASC7621 HARDWARE MONITOR DRIVER
M: George Joseph <george.joseph@fairview5.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/asc7621
F: drivers/hwmon/asc7621.c
@@ -1864,7 +1864,7 @@ F: drivers/net/wireless/ath/carl9170/
ATK0110 HWMON DRIVER
M: Luca Tettamanti <kronos.it@gmail.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/asus_atk0110.c
@@ -2984,7 +2984,7 @@ F: mm/swap_cgroup.c
CORETEMP HARDWARE MONITORING DRIVER
M: Fenghua Yu <fenghua.yu@intel.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/coretemp
F: drivers/hwmon/coretemp.c
@@ -3549,7 +3549,7 @@ T: git git://git.infradead.org/users/vkoul/slave-dma.git
DME1737 HARDWARE MONITOR DRIVER
M: Juerg Haefliger <juergh@gmail.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/dme1737
F: drivers/hwmon/dme1737.c
@@ -4262,7 +4262,7 @@ F: include/video/exynos_mipi*
F71805F HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/f71805f
F: drivers/hwmon/f71805f.c
@@ -4341,7 +4341,7 @@ F: fs/*
FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
M: Riku Voipio <riku.voipio@iki.fi>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/f75375s.c
F: include/linux/f75375s.h
@@ -4883,8 +4883,8 @@ F: drivers/media/usb/hackrf/
HARDWARE MONITORING
M: Jean Delvare <jdelvare@suse.com>
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
-W: http://www.lm-sensors.org/
+L: linux-hwmon@vger.kernel.org
+W: http://hwmon.wiki.kernel.org/
T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
S: Maintained
@@ -5393,7 +5393,7 @@ F: drivers/usb/atm/ueagle-atm.c
INA209 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/ina209
F: Documentation/devicetree/bindings/i2c/ina209.txt
@@ -5401,7 +5401,7 @@ F: drivers/hwmon/ina209.c
INA2XX HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/ina2xx
F: drivers/hwmon/ina2xx.c
@@ -5884,7 +5884,7 @@ F: drivers/isdn/hardware/eicon/
IT87 HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/it87
F: drivers/hwmon/it87.c
@@ -5920,7 +5920,7 @@ F: drivers/media/dvb-frontends/ix2505v*
JC42.4 TEMPERATURE SENSOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/jc42.c
F: Documentation/hwmon/jc42
@@ -5970,14 +5970,14 @@ F: drivers/tty/serial/jsm/
K10TEMP HARDWARE MONITORING DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/k10temp
F: drivers/hwmon/k10temp.c
K8TEMP HARDWARE MONITORING DRIVER
M: Rudolf Marek <r.marek@assembler.cz>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/k8temp
F: drivers/hwmon/k8temp.c
@@ -6485,27 +6485,27 @@ F: net/llc/
LM73 HARDWARE MONITOR DRIVER
M: Guillaume Ligneul <guillaume.ligneul@gmail.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/lm73.c
LM78 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/lm78
F: drivers/hwmon/lm78.c
LM83 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/lm83
F: drivers/hwmon/lm83.c
LM90 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/lm90
F: Documentation/devicetree/bindings/hwmon/lm90.txt
@@ -6513,7 +6513,7 @@ F: drivers/hwmon/lm90.c
LM95234 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/lm95234
F: drivers/hwmon/lm95234.c
@@ -6580,7 +6580,7 @@ F: drivers/scsi/sym53c8xx_2/
LTC4261 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/ltc4261
F: drivers/hwmon/ltc4261.c
@@ -6749,28 +6749,28 @@ F: include/uapi/linux/matroxfb.h
MAX16065 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/max16065
F: drivers/hwmon/max16065.c
MAX20751 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/max20751
F: drivers/hwmon/max20751.c
MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
M: "Hans J. Koch" <hjk@hansjkoch.de>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/max6650
F: drivers/hwmon/max6650.c
MAX6697 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/max6697
F: Documentation/devicetree/bindings/i2c/max6697.txt
@@ -7303,7 +7303,7 @@ F: drivers/scsi/NCR_D700.*
NCT6775 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/nct6775
F: drivers/hwmon/nct6775.c
@@ -8064,7 +8064,7 @@ F: drivers/video/logo/logo_parisc*
PC87360 HARDWARE MONITORING DRIVER
M: Jim Cromie <jim.cromie@gmail.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/pc87360
F: drivers/hwmon/pc87360.c
@@ -8076,7 +8076,7 @@ F: drivers/char/pc8736x_gpio.c
PC87427 HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/pc87427
F: drivers/hwmon/pc87427.c
@@ -8415,8 +8415,8 @@ F: drivers/rtc/rtc-puv3.c
PMBUS HARDWARE MONITORING DRIVERS
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
-W: http://www.lm-sensors.org/
+L: linux-hwmon@vger.kernel.org
+W: http://hwmon.wiki.kernel.org/
W: http://www.roeck-us.net/linux/drivers/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
S: Maintained
@@ -8610,7 +8610,7 @@ F: drivers/media/usb/pwc/*
PWM FAN DRIVER
M: Kamil Debski <k.debski@samsung.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt
F: Documentation/hwmon/pwm-fan
@@ -9882,28 +9882,28 @@ F: Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
SMM665 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/smm665
F: drivers/hwmon/smm665.c
SMSC EMC2103 HARDWARE MONITOR DRIVER
M: Steve Glendinning <steve.glendinning@shawell.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/emc2103
F: drivers/hwmon/emc2103.c
SMSC SCH5627 HARDWARE MONITOR DRIVER
M: Hans de Goede <hdegoede@redhat.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Supported
F: Documentation/hwmon/sch5627
F: drivers/hwmon/sch5627.c
SMSC47B397 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/smsc47b397
F: drivers/hwmon/smsc47b397.c
@@ -10289,9 +10289,11 @@ S: Maintained
F: drivers/net/ethernet/dlink/sundance.c
SUPERH
+M: Yoshinori Sato <ysato@users.sourceforge.jp>
+M: Rich Felker <dalias@libc.org>
L: linux-sh@vger.kernel.org
Q: http://patchwork.kernel.org/project/linux-sh/list/
-S: Orphan
+S: Maintained
F: Documentation/sh/
F: arch/sh/
F: drivers/sh/
@@ -10828,7 +10830,7 @@ F: include/linux/mmc/sh_mobile_sdhi.h
TMP401 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/tmp401
F: drivers/hwmon/tmp401.c
@@ -11562,14 +11564,14 @@ F: Documentation/networking/vrf.txt
VT1211 HARDWARE MONITOR DRIVER
M: Juerg Haefliger <juergh@gmail.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/vt1211
F: drivers/hwmon/vt1211.c
VT8231 HARDWARE MONITOR DRIVER
M: Roger Lucas <vt8231@hiddenengine.co.uk>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/vt8231.c
@@ -11588,21 +11590,21 @@ F: drivers/w1/
W83791D HARDWARE MONITORING DRIVER
M: Marc Hulsman <m.hulsman@tudelft.nl>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/w83791d
F: drivers/hwmon/w83791d.c
W83793 HARDWARE MONITORING DRIVER
M: Rudolf Marek <r.marek@assembler.cz>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/w83793
F: drivers/hwmon/w83793.c
W83795 HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/w83795.c
diff --git a/Makefile b/Makefile
index 70dea02f1346..1928fcd539cc 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 0
+SUBLEVEL = 8
EXTRAVERSION =
NAME = Blurry Fish Butt
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 57c1f33844d4..0352fb8d21b9 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -35,21 +35,6 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
\
m += nr >> 5; \
\
- /* \
- * ARC ISA micro-optimization: \
- * \
- * Instructions dealing with bitpos only consider lower 5 bits \
- * e.g (x << 33) is handled like (x << 1) by ASL instruction \
- * (mem pointer still needs adjustment to point to next word) \
- * \
- * Hence the masking to clamp @nr arg can be elided in general. \
- * \
- * However if @nr is a constant (above assumed in a register), \
- * and greater than 31, gcc can optimize away (x << 33) to 0, \
- * as overflow, given the 32-bit ISA. Thus masking needs to be \
- * done for const @nr, but no code is generated due to gcc \
- * const prop. \
- */ \
nr &= 0x1f; \
\
__asm__ __volatile__( \
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index 694ece8a0243..27b17adea50d 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -129,15 +129,23 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
/*
- * Relaxed API for drivers which can handle any ordering themselves
+ * Relaxed API for drivers which can handle barrier ordering themselves
+ *
+ * Also these are defined to perform little endian accesses.
+ * To provide the typical device register semantics of fixed endian,
+ * swap the byte order for Big Endian
+ *
+ * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
*/
#define readb_relaxed(c) __raw_readb(c)
-#define readw_relaxed(c) __raw_readw(c)
-#define readl_relaxed(c) __raw_readl(c)
+#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
+ __raw_readw(c)); __r; })
+#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
+ __raw_readl(c)); __r; })
#define writeb_relaxed(v,c) __raw_writeb(v,c)
-#define writew_relaxed(v,c) __raw_writew(v,c)
-#define writel_relaxed(v,c) __raw_writel(v,c)
+#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
+#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
#include <asm-generic/io.h>
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index 258b0e5ad332..68b6092349d7 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -22,6 +22,7 @@
#define AUX_IRQ_CTRL 0x00E
#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
#define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */
+#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
#define AUX_IRQ_PRIORITY 0x206
#define ICAUSE 0x40a
#define AUX_IRQ_SELECT 0x40b
@@ -112,6 +113,16 @@ static inline int arch_irqs_disabled(void)
return arch_irqs_disabled_flags(arch_local_save_flags());
}
+static inline void arc_softirq_trigger(int irq)
+{
+ write_aux_reg(AUX_IRQ_HINT, irq);
+}
+
+static inline void arc_softirq_clear(int irq)
+{
+ write_aux_reg(AUX_IRQ_HINT, 0);
+}
+
#else
.macro IRQ_DISABLE scratch
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index cbfec79137bf..c1264607bbff 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -45,11 +45,12 @@ VECTOR reserved ; Reserved slots
VECTOR handle_interrupt ; (16) Timer0
VECTOR handle_interrupt ; unused (Timer1)
VECTOR handle_interrupt ; unused (WDT)
-VECTOR handle_interrupt ; (19) ICI (inter core interrupt)
-VECTOR handle_interrupt
-VECTOR handle_interrupt
-VECTOR handle_interrupt
-VECTOR handle_interrupt ; (23) End of fixed IRQs
+VECTOR handle_interrupt ; (19) Inter core Interrupt (IPI)
+VECTOR handle_interrupt ; (20) perf Interrupt
+VECTOR handle_interrupt ; (21) Software Triggered Intr (Self IPI)
+VECTOR handle_interrupt ; unused
+VECTOR handle_interrupt ; (23) unused
+# End of fixed IRQs
.rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8
VECTOR handle_interrupt
@@ -211,7 +212,11 @@ debug_marker_syscall:
; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
; entry was via Exception in DS which got preempted in kernel).
;
-; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling
+; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
+;
+; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline
+; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly
+
.Lintr_ret_to_delay_slot:
debug_marker_ds:
@@ -222,18 +227,23 @@ debug_marker_ds:
ld r2, [sp, PT_ret]
ld r3, [sp, PT_status32]
+ ; STAT32 for Int return created from scratch
+ ; (No delay dlot, disable Further intr in trampoline)
+
bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
st r0, [sp, PT_status32]
mov r1, .Lintr_ret_to_delay_slot_2
st r1, [sp, PT_ret]
+ ; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots
st r2, [sp, 0]
st r3, [sp, 4]
b .Lisr_ret_fast_path
.Lintr_ret_to_delay_slot_2:
+ ; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP
sub sp, sp, SZ_PT_REGS
st r9, [sp, -4]
@@ -243,11 +253,19 @@ debug_marker_ds:
ld r9, [sp, 4]
sr r9, [erstatus]
+ ; restore AUX_USER_SP if returning to U mode
+ bbit0 r9, STATUS_U_BIT, 1f
+ ld r9, [sp, PT_sp]
+ sr r9, [AUX_USER_SP]
+
+1:
ld r9, [sp, 8]
sr r9, [erbta]
ld r9, [sp, -4]
add sp, sp, SZ_PT_REGS
+
+ ; return from pure kernel mode to delay slot
rtie
END(ret_from_exception)
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index bd237acdf4f2..30d806ce0c78 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -11,9 +11,12 @@
#include <linux/smp.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
+#include <asm/irqflags-arcv2.h>
#include <asm/mcip.h>
#include <asm/setup.h>
+#define SOFTIRQ_IRQ 21
+
static char smp_cpuinfo_buf[128];
static int idu_detected;
@@ -22,6 +25,7 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
static void mcip_setup_per_cpu(int cpu)
{
smp_ipi_irq_setup(cpu, IPI_IRQ);
+ smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
}
static void mcip_ipi_send(int cpu)
@@ -29,6 +33,12 @@ static void mcip_ipi_send(int cpu)
unsigned long flags;
int ipi_was_pending;
+ /* ARConnect can only send IPI to others */
+ if (unlikely(cpu == raw_smp_processor_id())) {
+ arc_softirq_trigger(SOFTIRQ_IRQ);
+ return;
+ }
+
/*
* NOTE: We must spin here if the other cpu hasn't yet
* serviced a previous message. This can burn lots
@@ -63,6 +73,11 @@ static void mcip_ipi_clear(int irq)
unsigned long flags;
unsigned int __maybe_unused copy;
+ if (unlikely(irq == SOFTIRQ_IRQ)) {
+ arc_softirq_clear(irq);
+ return;
+ }
+
raw_spin_lock_irqsave(&mcip_lock, flags);
/* Who sent the IPI */
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 0901c6bd458f..f08e4fd600a3 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -162,10 +162,9 @@ choice
mobile SoCs in the Kona family of chips (e.g. bcm28155,
bcm11351, etc...)
- config DEBUG_BCM63XX
+ config DEBUG_BCM63XX_UART
bool "Kernel low-level debugging on BCM63XX UART"
depends on ARCH_BCM_63XX
- select DEBUG_UART_BCM63XX
config DEBUG_BERLIN_UART
bool "Marvell Berlin SoC Debug UART"
@@ -1348,7 +1347,7 @@ config DEBUG_LL_INCLUDE
default "debug/vf.S" if DEBUG_VF_UART
default "debug/vt8500.S" if DEBUG_VT8500_UART0
default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
- default "debug/bcm63xx.S" if DEBUG_UART_BCM63XX
+ default "debug/bcm63xx.S" if DEBUG_BCM63XX_UART
default "debug/digicolor.S" if DEBUG_DIGICOLOR_UA0
default "mach/debug-macro.S"
@@ -1364,10 +1363,6 @@ config DEBUG_UART_8250
ARCH_IOP33X || ARCH_IXP4XX || \
ARCH_LPC32XX || ARCH_MV78XX0 || ARCH_ORION5X || ARCH_RPC
-# Compatibility options for BCM63xx
-config DEBUG_UART_BCM63XX
- def_bool ARCH_BCM_63XX
-
config DEBUG_UART_PHYS
hex "Physical base address of debug UART"
default 0x00100a00 if DEBUG_NETX_UART
@@ -1462,7 +1457,7 @@ config DEBUG_UART_PHYS
default 0xfffb0000 if DEBUG_OMAP1UART1 || DEBUG_OMAP7XXUART1
default 0xfffb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2
default 0xfffb9800 if DEBUG_OMAP1UART3 || DEBUG_OMAP7XXUART3
- default 0xfffe8600 if DEBUG_UART_BCM63XX
+ default 0xfffe8600 if DEBUG_BCM63XX_UART
default 0xfffff700 if ARCH_IOP33X
depends on ARCH_EP93XX || \
DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
@@ -1474,7 +1469,7 @@ config DEBUG_UART_PHYS
DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF2 || \
DEBUG_RMOBILE_SCIFA0 || DEBUG_RMOBILE_SCIFA1 || \
DEBUG_RMOBILE_SCIFA4 || DEBUG_S3C24XX_UART || \
- DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
+ DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0 || \
DEBUG_AT91_UART
@@ -1515,7 +1510,7 @@ config DEBUG_UART_VIRT
default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
default 0xfc40ab00 if DEBUG_BRCMSTB_UART
default 0xfc705000 if DEBUG_ZTE_ZX
- default 0xfcfe8600 if DEBUG_UART_BCM63XX
+ default 0xfcfe8600 if DEBUG_BCM63XX_UART
default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
default 0xfd000000 if ARCH_SPEAR13XX
default 0xfd012000 if ARCH_MV78XX0
@@ -1566,7 +1561,7 @@ config DEBUG_UART_VIRT
DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
DEBUG_NETX_UART || \
DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
- DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
+ DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0
config DEBUG_UART_8250_SHIFT
diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
index a633be3defda..cd316021d6ce 100644
--- a/arch/arm/boot/dts/armada-388-gp.dts
+++ b/arch/arm/boot/dts/armada-388-gp.dts
@@ -303,16 +303,6 @@
gpio = <&expander0 4 GPIO_ACTIVE_HIGH>;
};
- reg_usb2_1_vbus: v5-vbus1 {
- compatible = "regulator-fixed";
- regulator-name = "v5.0-vbus1";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- enable-active-high;
- regulator-always-on;
- gpio = <&expander0 4 GPIO_ACTIVE_HIGH>;
- };
-
reg_sata0: pwr-sata0 {
compatible = "regulator-fixed";
regulator-name = "pwr_en_sata0";
diff --git a/arch/arm/boot/dts/armada-xp-axpwifiap.dts b/arch/arm/boot/dts/armada-xp-axpwifiap.dts
index 23fc670c0427..5c21b236721f 100644
--- a/arch/arm/boot/dts/armada-xp-axpwifiap.dts
+++ b/arch/arm/boot/dts/armada-xp-axpwifiap.dts
@@ -70,8 +70,8 @@
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
pcie-controller {
status = "okay";
diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
index f774101416a5..ebe1d267406d 100644
--- a/arch/arm/boot/dts/armada-xp-db.dts
+++ b/arch/arm/boot/dts/armada-xp-db.dts
@@ -76,8 +76,8 @@
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x1000000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
devbus-bootcs {
status = "okay";
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
index 4878d7353069..5730b875c4f5 100644
--- a/arch/arm/boot/dts/armada-xp-gp.dts
+++ b/arch/arm/boot/dts/armada-xp-gp.dts
@@ -95,8 +95,8 @@
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x1000000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
devbus-bootcs {
status = "okay";
diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
index 58b500873bfd..d960fef77ca1 100644
--- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
+++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
@@ -65,8 +65,8 @@
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
pcie-controller {
status = "okay";
diff --git a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
index 6e9820e141f8..b89e6cf1271a 100644
--- a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
+++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
@@ -70,8 +70,8 @@
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
pcie-controller {
status = "okay";
diff --git a/arch/arm/boot/dts/armada-xp-matrix.dts b/arch/arm/boot/dts/armada-xp-matrix.dts
index 6ab33837a2b6..6522b04f4a8e 100644
--- a/arch/arm/boot/dts/armada-xp-matrix.dts
+++ b/arch/arm/boot/dts/armada-xp-matrix.dts
@@ -68,8 +68,8 @@
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
internal-regs {
serial@12000 {
diff --git a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
index 6fe8972de0a2..db54c7158a36 100644
--- a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
+++ b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
@@ -64,8 +64,8 @@
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
pcie-controller {
status = "okay";
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
index a5db17782e08..853bd392a4fe 100644
--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
@@ -65,9 +65,9 @@
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
- MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x8000000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x01, 0x2f) 0 0 0xe8000000 0x8000000
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
devbus-bootcs {
status = "okay";
diff --git a/arch/arm/boot/dts/armada-xp-synology-ds414.dts b/arch/arm/boot/dts/armada-xp-synology-ds414.dts
index 2391b11dc546..d17dab0a6f51 100644
--- a/arch/arm/boot/dts/armada-xp-synology-ds414.dts
+++ b/arch/arm/boot/dts/armada-xp-synology-ds414.dts
@@ -78,8 +78,8 @@
soc {
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
- MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+ MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
pcie-controller {
status = "okay";
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index ff888d21c786..f3e2b96c06a3 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -303,6 +303,7 @@
regulator-name = "mmc0-card-supply";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ regulator-always-on;
};
gpio_keys {
diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
index 131614f28e75..da84e65b56ef 100644
--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
@@ -86,10 +86,12 @@
macb0: ethernet@f8020000 {
phy-mode = "rmii";
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>;
phy0: ethernet-phy@1 {
interrupt-parent = <&pioE>;
- interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
reg = <1>;
};
};
@@ -152,6 +154,10 @@
atmel,pins =
<AT91_PIOE 8 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
};
+ pinctrl_macb0_phy_irq: macb0_phy_irq_0 {
+ atmel,pins =
+ <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+ };
};
};
};
@@ -262,5 +268,6 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
vin-supply = <&vcc_3v3_reg>;
+ regulator-always-on;
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
index 2d4a33100af6..4e98cda97403 100644
--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
@@ -160,8 +160,15 @@
};
macb0: ethernet@f8020000 {
+ pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>;
phy-mode = "rmii";
status = "okay";
+
+ ethernet-phy@1 {
+ reg = <0x1>;
+ interrupt-parent = <&pioE>;
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+ };
};
mmc1: mmc@fc000000 {
@@ -193,6 +200,10 @@
pinctrl@fc06a000 {
board {
+ pinctrl_macb0_phy_irq: macb0_phy_irq {
+ atmel,pins =
+ <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+ };
pinctrl_mmc0_cd: mmc0_cd {
atmel,pins =
<AT91_PIOE 5 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index fe99231cbde5..c2a03c740e79 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1497,6 +1497,16 @@
0x48485200 0x2E00>;
#address-cells = <1>;
#size-cells = <1>;
+
+ /*
+ * Do not allow gating of cpsw clock as workaround
+ * for errata i877. Keeping internal clock disabled
+ * causes the device switching characteristics
+ * to degrade over time and eventually fail to meet
+ * the data manual delay time/skew specs.
+ */
+ ti,no-idle;
+
/*
* rx_thresh_pend
* rx_pend
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index 36387b11451d..80f6c786a37e 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -122,6 +122,7 @@
interrupt-parent = <&gpio5>;
interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; /* gpio 152 */
ref-clock-frequency = <26000000>;
+ tcxo-clock-frequency = <26000000>;
};
};
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index 5cf76a1c5c75..41e80e7f20be 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -130,6 +130,16 @@
};
};
+&gpio8 {
+ /* TI trees use GPIO instead of msecure, see also muxing */
+ p234 {
+ gpio-hog;
+ gpios = <10 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "gpio8_234/msecure";
+ };
+};
+
&omap5_pmx_core {
pinctrl-names = "default";
pinctrl-0 = <
@@ -213,6 +223,13 @@
>;
};
+ /* TI trees use GPIO mode; msecure mode does not work reliably? */
+ palmas_msecure_pins: palmas_msecure_pins {
+ pinctrl-single,pins = <
+ OMAP5_IOPAD(0x180, PIN_OUTPUT | MUX_MODE6) /* gpio8_234 */
+ >;
+ };
+
usbhost_pins: pinmux_usbhost_pins {
pinctrl-single,pins = <
0x84 (PIN_INPUT | MUX_MODE0) /* usbb2_hsic_strobe */
@@ -278,6 +295,12 @@
&usbhost_wkup_pins
>;
+ palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
+ pinctrl-single,pins = <
+ OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */
+ >;
+ };
+
usbhost_wkup_pins: pinmux_usbhost_wkup_pins {
pinctrl-single,pins = <
0x1A (PIN_OUTPUT | MUX_MODE0) /* fref_clk1_out, USB hub clk */
@@ -345,6 +368,8 @@
interrupt-controller;
#interrupt-cells = <2>;
ti,system-power-controller;
+ pinctrl-names = "default";
+ pinctrl-0 = <&palmas_sys_nirq_pins &palmas_msecure_pins>;
extcon_usb3: palmas_usb {
compatible = "ti,palmas-usb-vid";
@@ -358,6 +383,14 @@
#clock-cells = <0>;
};
+ rtc {
+ compatible = "ti,palmas-rtc";
+ interrupt-parent = <&palmas>;
+ interrupts = <8 IRQ_TYPE_NONE>;
+ ti,backup-battery-chargeable;
+ ti,backup-battery-charge-high-current;
+ };
+
palmas_pmic {
compatible = "ti,palmas-pmic";
interrupt-parent = <&palmas>;
diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
index 1afe24629d1f..b0c912feaa2f 100644
--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
+++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
@@ -90,7 +90,7 @@
#define PIN_PA14__I2SC1_MCK PINMUX_PIN(PIN_PA14, 4, 2)
#define PIN_PA14__FLEXCOM3_IO2 PINMUX_PIN(PIN_PA14, 5, 1)
#define PIN_PA14__D9 PINMUX_PIN(PIN_PA14, 6, 2)
-#define PIN_PA15 14
+#define PIN_PA15 15
#define PIN_PA15__GPIO PINMUX_PIN(PIN_PA15, 0, 0)
#define PIN_PA15__SPI0_MOSI PINMUX_PIN(PIN_PA15, 1, 1)
#define PIN_PA15__TF1 PINMUX_PIN(PIN_PA15, 2, 1)
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 2193637b9cd2..3daf8d5d7878 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -1342,7 +1342,7 @@
dbgu: serial@fc069000 {
compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
reg = <0xfc069000 0x200>;
- interrupts = <2 IRQ_TYPE_LEVEL_HIGH 7>;
+ interrupts = <45 IRQ_TYPE_LEVEL_HIGH 7>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_dbgu>;
clocks = <&dbgu_clk>;
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index d0c743853318..27a333eb8987 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -127,22 +127,14 @@
};
mmcsd_default_mode: mmcsd_default {
mmcsd_default_cfg1 {
- /* MCCLK */
- pins = "GPIO8_B10";
- ste,output = <0>;
- };
- mmcsd_default_cfg2 {
- /* MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2 */
- pins = "GPIO10_C11", "GPIO15_A12",
- "GPIO16_C13", "GPIO23_D15";
- ste,output = <1>;
- };
- mmcsd_default_cfg3 {
- /* MCCMD, MCDAT3-0, MCMSFBCLK */
- pins = "GPIO9_A10", "GPIO11_B11",
- "GPIO12_A11", "GPIO13_C12",
- "GPIO14_B12", "GPIO24_C15";
- ste,input = <1>;
+ /*
+ * MCCLK, MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2
+ * MCCMD, MCDAT3-0, MCMSFBCLK
+ */
+ pins = "GPIO8_B10", "GPIO9_A10", "GPIO10_C11", "GPIO11_B11",
+ "GPIO12_A11", "GPIO13_C12", "GPIO14_B12", "GPIO15_A12",
+ "GPIO16_C13", "GPIO23_D15", "GPIO24_C15";
+ ste,output = <2>;
};
};
};
@@ -802,10 +794,21 @@
clock-names = "mclk", "apb_pclk";
interrupt-parent = <&vica>;
interrupts = <22>;
- max-frequency = <48000000>;
+ max-frequency = <400000>;
bus-width = <4>;
cap-mmc-highspeed;
cap-sd-highspeed;
+ full-pwr-cycle;
+ /*
+ * The STw4811 circuit used with the Nomadik strictly
+ * requires that all of these signal direction pins be
+ * routed and used for its 4-bit levelshifter.
+ */
+ st,sig-dir-dat0;
+ st,sig-dir-dat2;
+ st,sig-dir-dat31;
+ st,sig-dir-cmd;
+ st,sig-pin-fbclk;
pinctrl-names = "default";
pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>;
vmmc-supply = <&vmmc_regulator>;
diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c
index 2dc6da70ae59..d7ed252708c5 100644
--- a/arch/arm/common/icst.c
+++ b/arch/arm/common/icst.c
@@ -16,7 +16,7 @@
*/
#include <linux/module.h>
#include <linux/kernel.h>
-
+#include <asm/div64.h>
#include <asm/hardware/icst.h>
/*
@@ -29,7 +29,11 @@ EXPORT_SYMBOL(icst525_s2div);
unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
{
- return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
+ u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
+ u32 divisor = (vco.r + 2) * p->s2div[vco.s];
+
+ do_div(dividend, divisor);
+ return (unsigned long)dividend;
}
EXPORT_SYMBOL(icst_hz);
@@ -58,6 +62,7 @@ icst_hz_to_vco(const struct icst_params *p, unsigned long freq)
if (f > p->vco_min && f <= p->vco_max)
break;
+ i++;
} while (i < 8);
if (i >= 8)
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index 0375c8caa061..9408a994cc91 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
- bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page);
+ unsigned long page_pfn = page_to_xen_pfn(page);
+ unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
+ unsigned long compound_pages =
+ (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
+ bool local = (page_pfn <= dev_pfn) &&
+ (dev_pfn - page_pfn < compound_pages);
+
/*
- * Dom0 is mapped 1:1, while the Linux page can be spanned accross
- * multiple Xen page, it's not possible to have a mix of local and
- * foreign Xen page. So if the first xen_pfn == mfn the page is local
- * otherwise it's a foreign page grant-mapped in dom0. If the page is
- * local we can safely call the native dma_ops function, otherwise we
- * call the xen specific function.
+ * Dom0 is mapped 1:1, while the Linux page can span across
+ * multiple Xen pages, it's not possible for it to contain a
+ * mix of local and foreign Xen pages. So if the first xen_pfn
+ * == mfn the page is local otherwise it's a foreign page
+ * grant-mapped in dom0. If the page is local we can safely
+ * call the native dma_ops function, otherwise we call the xen
+ * specific function.
*/
if (local)
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 96e935bbc38c..3705fc2921c2 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -155,7 +155,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id);
- return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
+ return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
}
static unsigned long num_core_regs(void)
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 7b76ce01c21d..8633c703546a 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -101,10 +101,8 @@ static void omap2_onenand_set_async_mode(void __iomem *onenand_base)
static void set_onenand_cfg(void __iomem *onenand_base)
{
- u32 reg;
+ u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
- reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
- reg &= ~((0x7 << ONENAND_SYS_CFG1_BRL_SHIFT) | (0x7 << 9));
reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) |
ONENAND_SYS_CFG1_BL_16;
if (onenand_flags & ONENAND_FLAG_SYNCREAD)
@@ -123,6 +121,7 @@ static void set_onenand_cfg(void __iomem *onenand_base)
reg |= ONENAND_SYS_CFG1_VHF;
else
reg &= ~ONENAND_SYS_CFG1_VHF;
+
writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
}
@@ -289,6 +288,7 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
}
}
+ onenand_async.sync_write = true;
omap2_onenand_calc_async_timings(&t);
ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 48495ad82aba..8e0bd5939e5a 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2200,6 +2200,11 @@ static int _enable(struct omap_hwmod *oh)
*/
static int _idle(struct omap_hwmod *oh)
{
+ if (oh->flags & HWMOD_NO_IDLE) {
+ oh->_int_flags |= _HWMOD_SKIP_ENABLE;
+ return 0;
+ }
+
pr_debug("omap_hwmod: %s: idling\n", oh->name);
if (oh->_state != _HWMOD_STATE_ENABLED) {
@@ -2504,6 +2509,8 @@ static int __init _init(struct omap_hwmod *oh, void *data)
oh->flags |= HWMOD_INIT_NO_RESET;
if (of_find_property(np, "ti,no-idle-on-init", NULL))
oh->flags |= HWMOD_INIT_NO_IDLE;
+ if (of_find_property(np, "ti,no-idle", NULL))
+ oh->flags |= HWMOD_NO_IDLE;
}
oh->_state = _HWMOD_STATE_INITIALIZED;
@@ -2630,7 +2637,7 @@ static void __init _setup_postsetup(struct omap_hwmod *oh)
* XXX HWMOD_INIT_NO_IDLE does not belong in hwmod data -
* it should be set by the core code as a runtime flag during startup
*/
- if ((oh->flags & HWMOD_INIT_NO_IDLE) &&
+ if ((oh->flags & (HWMOD_INIT_NO_IDLE | HWMOD_NO_IDLE)) &&
(postsetup_state == _HWMOD_STATE_IDLE)) {
oh->_int_flags |= _HWMOD_SKIP_ENABLE;
postsetup_state = _HWMOD_STATE_ENABLED;
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index 76bce11c85a4..7c7a31169475 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -525,6 +525,8 @@ struct omap_hwmod_omap4_prcm {
* or idled.
* HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to
* operate and they need to be handled at the same time as the main_clk.
+ * HWMOD_NO_IDLE: Do not idle the hwmod at all. Useful to handle certain
+ * IPs like CPSW on DRA7, where clocks to this module cannot be disabled.
*/
#define HWMOD_SWSUP_SIDLE (1 << 0)
#define HWMOD_SWSUP_MSTANDBY (1 << 1)
@@ -541,6 +543,7 @@ struct omap_hwmod_omap4_prcm {
#define HWMOD_SWSUP_SIDLE_ACT (1 << 12)
#define HWMOD_RECONFIG_IO_CHAIN (1 << 13)
#define HWMOD_OPT_CLKS_NEEDED (1 << 14)
+#define HWMOD_NO_IDLE (1 << 15)
/*
* omap_hwmod._int_flags definitions
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
index eafd120b53f1..1b9f0520dea9 100644
--- a/arch/arm/mach-omap2/sleep34xx.S
+++ b/arch/arm/mach-omap2/sleep34xx.S
@@ -86,13 +86,18 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
stmfd sp!, {lr} @ save registers on stack
/* Setup so that we will disable and enable l2 */
mov r1, #0x1
- adrl r2, l2dis_3630 @ may be too distant for plain adr
- str r1, [r2]
+ adrl r3, l2dis_3630_offset @ may be too distant for plain adr
+ ldr r2, [r3] @ value for offset
+ str r1, [r2, r3] @ write to l2dis_3630
ldmfd sp!, {pc} @ restore regs and return
ENDPROC(enable_omap3630_toggle_l2_on_restore)
- .text
-/* Function to call rom code to save secure ram context */
+/*
+ * Function to call rom code to save secure ram context. This gets
+ * relocated to SRAM, so it can be all in .data section. Otherwise
+ * we need to initialize api_params separately.
+ */
+ .data
.align 3
ENTRY(save_secure_ram_context)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
@@ -126,6 +131,8 @@ ENDPROC(save_secure_ram_context)
ENTRY(save_secure_ram_context_sz)
.word . - save_secure_ram_context
+ .text
+
/*
* ======================
* == Idle entry point ==
@@ -289,12 +296,6 @@ wait_sdrc_ready:
bic r5, r5, #0x40
str r5, [r4]
-/*
- * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
- * base instead.
- * Be careful not to clobber r7 when maintaing this code.
- */
-
is_dll_in_lock_mode:
/* Is dll in lock mode? */
ldr r4, sdrc_dlla_ctrl
@@ -302,11 +303,7 @@ is_dll_in_lock_mode:
tst r5, #0x4
bne exit_nonoff_modes @ Return if locked
/* wait till dll locks */
- adr r7, kick_counter
wait_dll_lock_timed:
- ldr r4, wait_dll_lock_counter
- add r4, r4, #1
- str r4, [r7, #wait_dll_lock_counter - kick_counter]
ldr r4, sdrc_dlla_status
/* Wait 20uS for lock */
mov r6, #8
@@ -330,9 +327,6 @@ kick_dll:
orr r6, r6, #(1<<3) @ enable dll
str r6, [r4]
dsb
- ldr r4, kick_counter
- add r4, r4, #1
- str r4, [r7] @ kick_counter
b wait_dll_lock_timed
exit_nonoff_modes:
@@ -360,15 +354,6 @@ sdrc_dlla_status:
.word SDRC_DLLA_STATUS_V
sdrc_dlla_ctrl:
.word SDRC_DLLA_CTRL_V
- /*
- * When exporting to userspace while the counters are in SRAM,
- * these 2 words need to be at the end to facilitate retrival!
- */
-kick_counter:
- .word 0
-wait_dll_lock_counter:
- .word 0
-
ENTRY(omap3_do_wfi_sz)
.word . - omap3_do_wfi
@@ -437,7 +422,9 @@ ENTRY(omap3_restore)
cmp r2, #0x0 @ Check if target power state was OFF or RET
bne logic_l1_restore
- ldr r0, l2dis_3630
+ adr r1, l2dis_3630_offset @ address for offset
+ ldr r0, [r1] @ value for offset
+ ldr r0, [r1, r0] @ value at l2dis_3630
cmp r0, #0x1 @ should we disable L2 on 3630?
bne skipl2dis
mrc p15, 0, r0, c1, c0, 1
@@ -449,12 +436,14 @@ skipl2dis:
and r1, #0x700
cmp r1, #0x300
beq l2_inv_gp
+ adr r0, l2_inv_api_params_offset
+ ldr r3, [r0]
+ add r3, r3, r0 @ r3 points to dummy parameters
mov r0, #40 @ set service ID for PPA
mov r12, r0 @ copy secure Service ID in r12
mov r1, #0 @ set task id for ROM code in r1
mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff
- adr r3, l2_inv_api_params @ r3 points to dummy parameters
dsb @ data write barrier
dmb @ data memory barrier
smc #1 @ call SMI monitor (smi #1)
@@ -488,8 +477,8 @@ skipl2dis:
b logic_l1_restore
.align
-l2_inv_api_params:
- .word 0x1, 0x00
+l2_inv_api_params_offset:
+ .long l2_inv_api_params - .
l2_inv_gp:
/* Execute smi to invalidate L2 cache */
mov r12, #0x1 @ set up to invalidate L2
@@ -506,7 +495,9 @@ l2_inv_gp:
mov r12, #0x2
smc #0 @ Call SMI monitor (smieq)
logic_l1_restore:
- ldr r1, l2dis_3630
+ adr r0, l2dis_3630_offset @ adress for offset
+ ldr r1, [r0] @ value for offset
+ ldr r1, [r0, r1] @ value at l2dis_3630
cmp r1, #0x1 @ Test if L2 re-enable needed on 3630
bne skipl2reen
mrc p15, 0, r1, c1, c0, 1
@@ -535,9 +526,17 @@ control_stat:
.word CONTROL_STAT
control_mem_rta:
.word CONTROL_MEM_RTA_CTRL
+l2dis_3630_offset:
+ .long l2dis_3630 - .
+
+ .data
l2dis_3630:
.word 0
+ .data
+l2_inv_api_params:
+ .word 0x1, 0x00
+
/*
* Internal functions
*/
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index 9b09d85d811a..c7a3b4aab4b5 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -29,12 +29,6 @@
dsb
.endm
-ppa_zero_params:
- .word 0x0
-
-ppa_por_params:
- .word 1, 0
-
#ifdef CONFIG_ARCH_OMAP4
/*
@@ -266,7 +260,9 @@ ENTRY(omap4_cpu_resume)
beq skip_ns_smp_enable
ppa_actrl_retry:
mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
- adr r3, ppa_zero_params @ Pointer to parameters
+ adr r1, ppa_zero_params_offset
+ ldr r3, [r1]
+ add r3, r3, r1 @ Pointer to ppa_zero_params
mov r1, #0x0 @ Process ID
mov r2, #0x4 @ Flag
mov r6, #0xff
@@ -303,7 +299,9 @@ skip_ns_smp_enable:
ldr r0, =OMAP4_PPA_L2_POR_INDEX
ldr r1, =OMAP44XX_SAR_RAM_BASE
ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
- adr r3, ppa_por_params
+ adr r1, ppa_por_params_offset
+ ldr r3, [r1]
+ add r3, r3, r1 @ Pointer to ppa_por_params
str r4, [r3, #0x04]
mov r1, #0x0 @ Process ID
mov r2, #0x4 @ Flag
@@ -328,6 +326,8 @@ skip_l2en:
#endif
b cpu_resume @ Jump to generic resume
+ppa_por_params_offset:
+ .long ppa_por_params - .
ENDPROC(omap4_cpu_resume)
#endif /* CONFIG_ARCH_OMAP4 */
@@ -380,4 +380,13 @@ ENTRY(omap_do_wfi)
nop
ldmfd sp!, {pc}
+ppa_zero_params_offset:
+ .long ppa_zero_params - .
ENDPROC(omap_do_wfi)
+
+ .data
+ppa_zero_params:
+ .word 0
+
+ppa_por_params:
+ .word 1, 0
diff --git a/arch/arm/mach-s3c64xx/dev-audio.c b/arch/arm/mach-s3c64xx/dev-audio.c
index ff780a8d8366..9a42736ef4ac 100644
--- a/arch/arm/mach-s3c64xx/dev-audio.c
+++ b/arch/arm/mach-s3c64xx/dev-audio.c
@@ -54,12 +54,12 @@ static int s3c64xx_i2s_cfg_gpio(struct platform_device *pdev)
static struct resource s3c64xx_iis0_resource[] = {
[0] = DEFINE_RES_MEM(S3C64XX_PA_IIS0, SZ_256),
- [1] = DEFINE_RES_DMA(DMACH_I2S0_OUT),
- [2] = DEFINE_RES_DMA(DMACH_I2S0_IN),
};
-static struct s3c_audio_pdata i2sv3_pdata = {
+static struct s3c_audio_pdata i2s0_pdata = {
.cfg_gpio = s3c64xx_i2s_cfg_gpio,
+ .dma_playback = DMACH_I2S0_OUT,
+ .dma_capture = DMACH_I2S0_IN,
};
struct platform_device s3c64xx_device_iis0 = {
@@ -68,15 +68,19 @@ struct platform_device s3c64xx_device_iis0 = {
.num_resources = ARRAY_SIZE(s3c64xx_iis0_resource),
.resource = s3c64xx_iis0_resource,
.dev = {
- .platform_data = &i2sv3_pdata,
+ .platform_data = &i2s0_pdata,
},
};
EXPORT_SYMBOL(s3c64xx_device_iis0);
static struct resource s3c64xx_iis1_resource[] = {
[0] = DEFINE_RES_MEM(S3C64XX_PA_IIS1, SZ_256),
- [1] = DEFINE_RES_DMA(DMACH_I2S1_OUT),
- [2] = DEFINE_RES_DMA(DMACH_I2S1_IN),
+};
+
+static struct s3c_audio_pdata i2s1_pdata = {
+ .cfg_gpio = s3c64xx_i2s_cfg_gpio,
+ .dma_playback = DMACH_I2S1_OUT,
+ .dma_capture = DMACH_I2S1_IN,
};
struct platform_device s3c64xx_device_iis1 = {
@@ -85,19 +89,19 @@ struct platform_device s3c64xx_device_iis1 = {
.num_resources = ARRAY_SIZE(s3c64xx_iis1_resource),
.resource = s3c64xx_iis1_resource,
.dev = {
- .platform_data = &i2sv3_pdata,
+ .platform_data = &i2s1_pdata,
},
};
EXPORT_SYMBOL(s3c64xx_device_iis1);
static struct resource s3c64xx_iisv4_resource[] = {
[0] = DEFINE_RES_MEM(S3C64XX_PA_IISV4, SZ_256),
- [1] = DEFINE_RES_DMA(DMACH_HSI_I2SV40_TX),
- [2] = DEFINE_RES_DMA(DMACH_HSI_I2SV40_RX),
};
static struct s3c_audio_pdata i2sv4_pdata = {
.cfg_gpio = s3c64xx_i2s_cfg_gpio,
+ .dma_playback = DMACH_HSI_I2SV40_TX,
+ .dma_capture = DMACH_HSI_I2SV40_RX,
.type = {
.i2s = {
.quirks = QUIRK_PRI_6CHAN,
@@ -142,12 +146,12 @@ static int s3c64xx_pcm_cfg_gpio(struct platform_device *pdev)
static struct resource s3c64xx_pcm0_resource[] = {
[0] = DEFINE_RES_MEM(S3C64XX_PA_PCM0, SZ_256),
- [1] = DEFINE_RES_DMA(DMACH_PCM0_TX),
- [2] = DEFINE_RES_DMA(DMACH_PCM0_RX),
};
static struct s3c_audio_pdata s3c_pcm0_pdata = {
.cfg_gpio = s3c64xx_pcm_cfg_gpio,
+ .dma_capture = DMACH_PCM0_RX,
+ .dma_playback = DMACH_PCM0_TX,
};
struct platform_device s3c64xx_device_pcm0 = {
@@ -163,12 +167,12 @@ EXPORT_SYMBOL(s3c64xx_device_pcm0);
static struct resource s3c64xx_pcm1_resource[] = {
[0] = DEFINE_RES_MEM(S3C64XX_PA_PCM1, SZ_256),
- [1] = DEFINE_RES_DMA(DMACH_PCM1_TX),
- [2] = DEFINE_RES_DMA(DMACH_PCM1_RX),
};
static struct s3c_audio_pdata s3c_pcm1_pdata = {
.cfg_gpio = s3c64xx_pcm_cfg_gpio,
+ .dma_playback = DMACH_PCM1_TX,
+ .dma_capture = DMACH_PCM1_RX,
};
struct platform_device s3c64xx_device_pcm1 = {
@@ -196,13 +200,14 @@ static int s3c64xx_ac97_cfg_gpe(struct platform_device *pdev)
static struct resource s3c64xx_ac97_resource[] = {
[0] = DEFINE_RES_MEM(S3C64XX_PA_AC97, SZ_256),
- [1] = DEFINE_RES_DMA(DMACH_AC97_PCMOUT),
- [2] = DEFINE_RES_DMA(DMACH_AC97_PCMIN),
- [3] = DEFINE_RES_DMA(DMACH_AC97_MICIN),
- [4] = DEFINE_RES_IRQ(IRQ_AC97),
+ [1] = DEFINE_RES_IRQ(IRQ_AC97),
};
-static struct s3c_audio_pdata s3c_ac97_pdata;
+static struct s3c_audio_pdata s3c_ac97_pdata = {
+ .dma_playback = DMACH_AC97_PCMOUT,
+ .dma_capture = DMACH_AC97_PCMIN,
+ .dma_capture_mic = DMACH_AC97_MICIN,
+};
static u64 s3c64xx_ac97_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/arm/mach-s3c64xx/include/mach/dma.h b/arch/arm/mach-s3c64xx/include/mach/dma.h
index 096e14073bd9..9c739eafe95c 100644
--- a/arch/arm/mach-s3c64xx/include/mach/dma.h
+++ b/arch/arm/mach-s3c64xx/include/mach/dma.h
@@ -14,38 +14,38 @@
#define S3C64XX_DMA_CHAN(name) ((unsigned long)(name))
/* DMA0/SDMA0 */
-#define DMACH_UART0 S3C64XX_DMA_CHAN("uart0_tx")
-#define DMACH_UART0_SRC2 S3C64XX_DMA_CHAN("uart0_rx")
-#define DMACH_UART1 S3C64XX_DMA_CHAN("uart1_tx")
-#define DMACH_UART1_SRC2 S3C64XX_DMA_CHAN("uart1_rx")
-#define DMACH_UART2 S3C64XX_DMA_CHAN("uart2_tx")
-#define DMACH_UART2_SRC2 S3C64XX_DMA_CHAN("uart2_rx")
-#define DMACH_UART3 S3C64XX_DMA_CHAN("uart3_tx")
-#define DMACH_UART3_SRC2 S3C64XX_DMA_CHAN("uart3_rx")
-#define DMACH_PCM0_TX S3C64XX_DMA_CHAN("pcm0_tx")
-#define DMACH_PCM0_RX S3C64XX_DMA_CHAN("pcm0_rx")
-#define DMACH_I2S0_OUT S3C64XX_DMA_CHAN("i2s0_tx")
-#define DMACH_I2S0_IN S3C64XX_DMA_CHAN("i2s0_rx")
+#define DMACH_UART0 "uart0_tx"
+#define DMACH_UART0_SRC2 "uart0_rx"
+#define DMACH_UART1 "uart1_tx"
+#define DMACH_UART1_SRC2 "uart1_rx"
+#define DMACH_UART2 "uart2_tx"
+#define DMACH_UART2_SRC2 "uart2_rx"
+#define DMACH_UART3 "uart3_tx"
+#define DMACH_UART3_SRC2 "uart3_rx"
+#define DMACH_PCM0_TX "pcm0_tx"
+#define DMACH_PCM0_RX "pcm0_rx"
+#define DMACH_I2S0_OUT "i2s0_tx"
+#define DMACH_I2S0_IN "i2s0_rx"
#define DMACH_SPI0_TX S3C64XX_DMA_CHAN("spi0_tx")
#define DMACH_SPI0_RX S3C64XX_DMA_CHAN("spi0_rx")
-#define DMACH_HSI_I2SV40_TX S3C64XX_DMA_CHAN("i2s2_tx")
-#define DMACH_HSI_I2SV40_RX S3C64XX_DMA_CHAN("i2s2_rx")
+#define DMACH_HSI_I2SV40_TX "i2s2_tx"
+#define DMACH_HSI_I2SV40_RX "i2s2_rx"
/* DMA1/SDMA1 */
-#define DMACH_PCM1_TX S3C64XX_DMA_CHAN("pcm1_tx")
-#define DMACH_PCM1_RX S3C64XX_DMA_CHAN("pcm1_rx")
-#define DMACH_I2S1_OUT S3C64XX_DMA_CHAN("i2s1_tx")
-#define DMACH_I2S1_IN S3C64XX_DMA_CHAN("i2s1_rx")
+#define DMACH_PCM1_TX "pcm1_tx"
+#define DMACH_PCM1_RX "pcm1_rx"
+#define DMACH_I2S1_OUT "i2s1_tx"
+#define DMACH_I2S1_IN "i2s1_rx"
#define DMACH_SPI1_TX S3C64XX_DMA_CHAN("spi1_tx")
#define DMACH_SPI1_RX S3C64XX_DMA_CHAN("spi1_rx")
-#define DMACH_AC97_PCMOUT S3C64XX_DMA_CHAN("ac97_out")
-#define DMACH_AC97_PCMIN S3C64XX_DMA_CHAN("ac97_in")
-#define DMACH_AC97_MICIN S3C64XX_DMA_CHAN("ac97_mic")
-#define DMACH_PWM S3C64XX_DMA_CHAN("pwm")
-#define DMACH_IRDA S3C64XX_DMA_CHAN("irda")
-#define DMACH_EXTERNAL S3C64XX_DMA_CHAN("external")
-#define DMACH_SECURITY_RX S3C64XX_DMA_CHAN("sec_rx")
-#define DMACH_SECURITY_TX S3C64XX_DMA_CHAN("sec_tx")
+#define DMACH_AC97_PCMOUT "ac97_out"
+#define DMACH_AC97_PCMIN "ac97_in"
+#define DMACH_AC97_MICIN "ac97_mic"
+#define DMACH_PWM "pwm"
+#define DMACH_IRDA "irda"
+#define DMACH_EXTERNAL "external"
+#define DMACH_SECURITY_RX "sec_rx"
+#define DMACH_SECURITY_TX "sec_tx"
enum dma_ch {
DMACH_MAX = 32
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 82074625de5c..e212f9d804bd 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -65,6 +65,7 @@
#include <linux/platform_data/usb-ohci-s3c2410.h>
#include <plat/usb-phy.h>
#include <plat/regs-spi.h>
+#include <linux/platform_data/asoc-s3c.h>
#include <linux/platform_data/spi-s3c64xx.h>
static u64 samsung_device_dma_mask = DMA_BIT_MASK(32);
@@ -74,9 +75,12 @@ static u64 samsung_device_dma_mask = DMA_BIT_MASK(32);
static struct resource s3c_ac97_resource[] = {
[0] = DEFINE_RES_MEM(S3C2440_PA_AC97, S3C2440_SZ_AC97),
[1] = DEFINE_RES_IRQ(IRQ_S3C244X_AC97),
- [2] = DEFINE_RES_DMA_NAMED(DMACH_PCM_OUT, "PCM out"),
- [3] = DEFINE_RES_DMA_NAMED(DMACH_PCM_IN, "PCM in"),
- [4] = DEFINE_RES_DMA_NAMED(DMACH_MIC_IN, "Mic in"),
+};
+
+static struct s3c_audio_pdata s3c_ac97_pdata = {
+ .dma_playback = (void *)DMACH_PCM_OUT,
+ .dma_capture = (void *)DMACH_PCM_IN,
+ .dma_capture_mic = (void *)DMACH_MIC_IN,
};
struct platform_device s3c_device_ac97 = {
@@ -87,6 +91,7 @@ struct platform_device s3c_device_ac97 = {
.dev = {
.dma_mask = &samsung_device_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s3c_ac97_pdata,
}
};
#endif /* CONFIG_CPU_S3C2440 */
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 4043c35962cc..4d2c201ddb93 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -36,6 +36,8 @@ config ARCH_LAYERSCAPE
config ARCH_HISI
bool "Hisilicon SoC Family"
+ select ARM_TIMER_SP804
+ select PINCTRL
help
This enables support for Hisilicon ARMv8 SoC family
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index a5c84594f6ad..285b32fa41c1 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -28,6 +28,7 @@ endif
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr)
KBUILD_CFLAGS += -fno-pic
+KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
KBUILD_AFLAGS += $(lseinstr)
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index 8d43a0fce522..691ddf0d79ce 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -6,11 +6,11 @@
*/
/dts-v1/;
-
-/*Reserved 1MB memory for MCU*/
-/memreserve/ 0x05e00000 0x00100000;
+#include <dt-bindings/gpio/gpio.h>
#include "hi6220.dtsi"
+#include "hikey-gpio.dtsi"
+#include "hikey-pinctrl.dtsi"
/ {
model = "HiKey Development Board";
@@ -21,14 +21,302 @@
serial1 = &uart1; /* BT UART */
serial2 = &uart2; /* LS Expansion UART0 */
serial3 = &uart3; /* LS Expansion UART1 */
+ spi0 = &spi_0;
};
chosen {
stdout-path = "serial3:115200n8";
};
+ /*
+ * Reserve below regions from memory node:
+ *
+ * 0x05e0,0000 - 0x05ef,ffff: MCU firmware runtime using
+ * 0x05f0,1000 - 0x05f0,1fff: Reboot reason
+ * 0x06df,f000 - 0x06df,ffff: Mailbox message data
+ * 0x0740,f000 - 0x0740,ffff: MCU firmware section
+ * 0x21f0,0000 - 0x21ff,ffff: pstore/ramoops buffer
+ * 0x3e00,0000 - 0x3fff,ffff: OP-TEE
+ */
memory@0 {
device_type = "memory";
- reg = <0x0 0x0 0x0 0x40000000>;
+ reg = <0x00000000 0x00000000 0x00000000 0x05e00000>,
+ <0x00000000 0x05f00000 0x00000000 0x00001000>,
+ <0x00000000 0x05f02000 0x00000000 0x00efd000>,
+ <0x00000000 0x06e00000 0x00000000 0x0060f000>,
+ <0x00000000 0x07410000 0x00000000 0x1aaf0000>,
+ <0x00000000 0x22000000 0x00000000 0x1c000000>;
+ };
+
+ pstore: pstore@0x21f00000 {
+ no-map;
+ reg = <0x0 0x21f00000 0x0 0x00100000>; /* pstore/ramoops buffer */
+ };
+
+ ramoops {
+ compatible = "ramoops";
+ memory-region = <&pstore>;
+ record-size = <0x0 0x00020000>;
+ console-size = <0x0 0x00020000>;
+ ftrace-size = <0x0 0x00020000>;
+ };
+
+ reboot_reason: reboot-reason@05f01000 {
+ compatible = "linux,reboot-reason-sram";
+ reg = <0x0 0x05F01000 0x0 0x4>;
+ reason,none = <0x77665501>;
+ reason,bootloader = <0x77665500>;
+ reason,recovery = <0x77665502>;
+ reason,oem = <0x6f656d00>;
+ };
+
+ soc {
+ i2c0: i2c@f7100000 {
+ status = "ok";
+ };
+
+ i2c1: i2c@f7101000 {
+ status = "ok";
+ };
+
+ i2c2: i2c@f7102000 {
+ status = "ok";
+
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ adv7533: adv7533@39 {
+ compatible = "adi,adv7533";
+ reg = <0x39>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <1 2>;
+ pd-gpio = <&gpio0 4 0>;
+ adi,dsi-lanes = <4>;
+
+ port {
+ adv_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+ };
+
+ uart1: uart@f7111000 {
+ status = "ok";
+ };
+
+ uart2: uart@f7112000 {
+ status = "ok";
+ };
+
+ uart3: uart@f7113000 {
+ status = "ok";
+ };
+
+ dwmmc_2: dwmmc2@f723f000 {
+ ti,non-removable;
+ non-removable;
+ /* WL_EN */
+ vmmc-supply = <&wlan_en_reg>;
+
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ wlcore: wlcore@2 {
+ compatible = "ti,wl1835";
+ reg = <2>; /* sdio func num */
+ /* WL_IRQ, WL_HOST_WAKE_GPIO1_3 */
+ interrupt-parent = <&gpio1>;
+ interrupts = <3 IRQ_TYPE_EDGE_RISING>;
+ };
+ };
+
+ wlan_en_reg: fixedregulator@1 {
+ compatible = "regulator-fixed";
+ regulator-name = "wlan-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ /* WLAN_EN GPIO */
+ gpio = <&gpio0 5 0>;
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
+
+ hisi-ion@0 {
+ compatible = "hisilicon,ion";
+
+ heap_sys_user@0 {
+ heap-name = "sys_user";
+ heap-range = <0x0 0x0>;
+ heap-type = "ion_system";
+ };
+
+ heap_sys_contig@0 {
+ heap-name = "sys_contig";
+ heap-range = <0x0 0x0>;
+ heap-type = "ion_system_contig";
+ };
+
+ heap_cma@0 {
+ heap-name = "cma";
+ heap-range = <0x0 0x0>;
+ heap-type = "ion_cma";
+ };
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ user_led1 {
+ label = "user_led4";
+ gpios = <&gpio4 0 0>; /* <&gpio_user_led_1>; */
+ linux,default-trigger = "heartbeat";
+ };
+
+ user_led2 {
+ label = "user_led3";
+ gpios = <&gpio4 1 0>; /* <&gpio_user_led_2>; */
+ linux,default-trigger = "mmc0";
+ };
+
+ user_led3 {
+ label = "user_led2";
+ gpios = <&gpio4 2 0>; /* <&gpio_user_led_3>; */
+ linux,default-trigger = "mmc1";
+ };
+
+ user_led4 {
+ label = "user_led1";
+ gpios = <&gpio4 3 0>; /* <&gpio_user_led_4>; */
+ linux,default-trigger = "cpu0";
+ };
+
+ wlan_active_led {
+ label = "wifi_active";
+ gpios = <&gpio3 5 0>; /* <&gpio_wlan_active_led>; */
+ linux,default-trigger = "phy0tx";
+ default-state = "off";
+ };
+
+ bt_active_led {
+ label = "bt_active";
+ gpios = <&gpio4 7 0>; /* <&gpio_bt_active_led>; */
+ linux,default-trigger = "hci0rx";
+ default-state = "off";
+ };
+ };
+
+ kim {
+ compatible = "kim";
+ pinctrl-names = "default";
+ pinctrl-0 = <>; /* FIXME: add BT PCM pinctrl here */
+ /*
+ * FIXME: The following is complete CRAP since
+ * the vendor driver doesn't follow the gpio
+ * binding. Passing in a magic Linux gpio number
+ * here until we fix the vendor driver.
+ */
+ /* BT_EN: BT_REG_ON_GPIO1_7 */
+ nshutdown_gpio = <503>;
+ dev_name = "/dev/ttyAMA1";
+ flow_cntrl = <1>;
+ baud_rate = <3000000>;
+ };
+
+ btwilink {
+ compatible = "btwilink";
+ };
+
+ pmic: pmic@f8000000 {
+ compatible = "hisilicon,hi655x-pmic";
+ reg = <0x0 0xf8000000 0x0 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ ponkey:ponkey@b1{
+ compatible = "hisilicon,hi6552-powerkey";
+ interrupt-parent = <&pmic>;
+ interrupts = <6 0>, <5 0>, <4 0>;
+ interrupt-names = "down", "up", "hold 4s";
+ };
+
+ regulators {
+ ldo2: LDO2@a21 {
+
+ regulator-name = "LDO2_2V8";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <3200000>;
+ regulator-enable-ramp-delay = <120>;
+ };
+
+ ldo7: LDO7@a26 {
+ regulator-name = "LDO7_SDIO";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <120>;
+ };
+
+ ldo10: LDO10@a29 {
+ regulator-name = "LDO10_2V85";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <360>;
+ };
+
+ ldo13: LDO13@a32 {
+ regulator-name = "LDO13_1V8";
+ regulator-min-microvolt = <1600000>;
+ regulator-max-microvolt = <1950000>;
+ regulator-enable-ramp-delay = <120>;
+ };
+
+ ldo14: LDO14@a33 {
+ regulator-name = "LDO14_2V8";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <3200000>;
+ regulator-enable-ramp-delay = <120>;
+ };
+
+ ldo15: LDO15@a34 {
+ regulator-name = "LDO15_1V8";
+ regulator-min-microvolt = <1600000>;
+ regulator-max-microvolt = <1950000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-enable-ramp-delay = <120>;
+ };
+
+ ldo17: LDO17@a36 {
+ regulator-name = "LDO17_2V5";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <3200000>;
+ regulator-enable-ramp-delay = <120>;
+ };
+
+ ldo19: LDO19@a38 {
+ regulator-name = "LDO19_3V0";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <360>;
+ };
+
+ ldo21: LDO21@a40 {
+ regulator-name = "LDO21_1V8";
+ regulator-min-microvolt = <1650000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-always-on;
+ regulator-enable-ramp-delay = <120>;
+ };
+
+ ldo22: LDO22@a41 {
+ regulator-name = "LDO22_1V2";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-enable-ramp-delay = <120>;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index 82d2488a0e86..466985a8510d 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -6,6 +6,9 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/hi6220-clock.h>
+#include <dt-bindings/pinctrl/hisi.h>
+#include <dt-bindings/thermal/thermal.h>
+#include <dt-bindings/reset/hisi,hi6220-resets.h>
/ {
compatible = "hisilicon,hi6220";
@@ -53,11 +56,42 @@
};
};
+ idle-states {
+ entry-method = "psci";
+
+ CPU_SLEEP: cpu-sleep {
+ compatible = "arm,idle-state";
+ local-timer-stop;
+ arm,psci-suspend-param = <0x0010000>;
+ entry-latency-us = <700>;
+ exit-latency-us = <250>;
+ min-residency-us = <1000>;
+ };
+
+ CLUSTER_SLEEP: cluster-sleep {
+ compatible = "arm,idle-state";
+ local-timer-stop;
+ arm,psci-suspend-param = <0x1010000>;
+ entry-latency-us = <1000>;
+ exit-latency-us = <700>;
+ min-residency-us = <2700>;
+ wakeup-latency-us = <1500>;
+ };
+ };
+
cpu0: cpu@0 {
compatible = "arm,cortex-a53", "arm,armv8";
device_type = "cpu";
reg = <0x0 0x0>;
enable-method = "psci";
+ next-level-cache = <&CLUSTER0_L2>;
+ clocks = <&stub_clock 0>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cooling-min-level = <4>;
+ cooling-max-level = <0>;
+ #cooling-cells = <2>; /* min followed by max */
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ dynamic-power-coefficient = <311>;
};
cpu1: cpu@1 {
@@ -65,6 +99,9 @@
device_type = "cpu";
reg = <0x0 0x1>;
enable-method = "psci";
+ next-level-cache = <&CLUSTER0_L2>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
};
cpu2: cpu@2 {
@@ -72,6 +109,9 @@
device_type = "cpu";
reg = <0x0 0x2>;
enable-method = "psci";
+ next-level-cache = <&CLUSTER0_L2>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
};
cpu3: cpu@3 {
@@ -79,6 +119,9 @@
device_type = "cpu";
reg = <0x0 0x3>;
enable-method = "psci";
+ next-level-cache = <&CLUSTER0_L2>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
};
cpu4: cpu@100 {
@@ -86,6 +129,9 @@
device_type = "cpu";
reg = <0x0 0x100>;
enable-method = "psci";
+ next-level-cache = <&CLUSTER1_L2>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
};
cpu5: cpu@101 {
@@ -93,6 +139,9 @@
device_type = "cpu";
reg = <0x0 0x101>;
enable-method = "psci";
+ next-level-cache = <&CLUSTER1_L2>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
};
cpu6: cpu@102 {
@@ -100,6 +149,9 @@
device_type = "cpu";
reg = <0x0 0x102>;
enable-method = "psci";
+ next-level-cache = <&CLUSTER1_L2>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
};
cpu7: cpu@103 {
@@ -107,6 +159,48 @@
device_type = "cpu";
reg = <0x0 0x103>;
enable-method = "psci";
+ next-level-cache = <&CLUSTER1_L2>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ };
+
+ CLUSTER0_L2: l2-cache0 {
+ compatible = "cache";
+ };
+
+ CLUSTER1_L2: l2-cache1 {
+ compatible = "cache";
+ };
+ };
+
+ cpu_opp_table: cpu_opp_table {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp00 {
+ opp-hz = /bits/ 64 <208000000>;
+ opp-microvolt = <1040000>;
+ clock-latency-ns = <500000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <432000000>;
+ opp-microvolt = <1040000>;
+ clock-latency-ns = <500000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <729000000>;
+ opp-microvolt = <1090000>;
+ clock-latency-ns = <500000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <960000000>;
+ opp-microvolt = <1180000>;
+ clock-latency-ns = <500000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <1330000>;
+ clock-latency-ns = <500000>;
};
};
@@ -137,6 +231,11 @@
#size-cells = <2>;
ranges;
+ sram: sram@fff80000 {
+ compatible = "hisilicon,hi6220-sramctrl", "syscon";
+ reg = <0x0 0xfff80000 0x0 0x12000>;
+ };
+
ao_ctrl: ao_ctrl@f7800000 {
compatible = "hisilicon,hi6220-aoctrl", "syscon";
reg = <0x0 0xf7800000 0x0 0x2000>;
@@ -147,6 +246,7 @@
compatible = "hisilicon,hi6220-sysctrl", "syscon";
reg = <0x0 0xf7030000 0x0 0x2000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
media_ctrl: media_ctrl@f4410000 {
@@ -161,6 +261,14 @@
#clock-cells = <1>;
};
+ stub_clock: stub_clock {
+ compatible = "hisilicon,hi6220-stub-clk";
+ hisilicon,hi6220-clk-sram = <&sram>;
+ #clock-cells = <1>;
+ mbox-names = "mbox-tx";
+ mboxes = <&mailbox 1 0 11>;
+ };
+
uart0: uart@f8015000 { /* console */
compatible = "arm,pl011", "arm,primecell";
reg = <0x0 0xf8015000 0x0 0x1000>;
@@ -177,6 +285,8 @@
clocks = <&sys_ctrl HI6220_UART1_PCLK>,
<&sys_ctrl HI6220_UART1_PCLK>;
clock-names = "uartclk", "apb_pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pmx_func &uart1_cfg_func1 &uart1_cfg_func2>;
status = "disabled";
};
@@ -187,6 +297,8 @@
clocks = <&sys_ctrl HI6220_UART2_PCLK>,
<&sys_ctrl HI6220_UART2_PCLK>;
clock-names = "uartclk", "apb_pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pmx_func &uart2_cfg_func>;
status = "disabled";
};
@@ -197,6 +309,9 @@
clocks = <&sys_ctrl HI6220_UART3_PCLK>,
<&sys_ctrl HI6220_UART3_PCLK>;
clock-names = "uartclk", "apb_pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart3_pmx_func &uart3_cfg_func>;
+ status = "disabled";
};
uart4: uart@f7114000 {
@@ -206,7 +321,656 @@
clocks = <&sys_ctrl HI6220_UART4_PCLK>,
<&sys_ctrl HI6220_UART4_PCLK>;
clock-names = "uartclk", "apb_pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4_pmx_func &uart4_cfg_func>;
+ status = "disabled";
+ };
+
+ dual_timer0: dual_timer@f8008000 {
+ compatible = "arm,sp804", "arm,primecell";
+ reg = <0x0 0xf8008000 0x0 0x1000>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ao_ctrl 27>;
+ clock-names = "apb_pclk";
+ };
+
+ rtc0: rtc@170000 {
+ compatible = "arm,pl031", "arm,primecell";
+ reg = <0x0 0xf8003000 0x0 0x1000>;
+ interrupts = <0 12 4>;
+ clocks = <&ao_ctrl HI6220_RTC0_PCLK>;
+ clock-names = "apb_pclk";
+ };
+
+ pmx0: pinmux@f7010000 {
+ compatible = "pinctrl-single";
+ reg = <0x0 0xf7010000 0x0 0x27c>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #gpio-range-cells = <3>;
+ pinctrl-single,register-width = <32>;
+ pinctrl-single,function-mask = <7>;
+ pinctrl-single,gpio-range = <
+ &range 80 8 MUX_M0 /* gpio 3: [0..7] */
+ &range 88 8 MUX_M0 /* gpio 4: [0..7] */
+ &range 96 8 MUX_M0 /* gpio 5: [0..7] */
+ &range 104 8 MUX_M0 /* gpio 6: [0..7] */
+ &range 112 8 MUX_M0 /* gpio 7: [0..7] */
+ &range 120 2 MUX_M0 /* gpio 8: [0..1] */
+ &range 2 6 MUX_M1 /* gpio 8: [2..7] */
+ &range 8 8 MUX_M1 /* gpio 9: [0..7] */
+ &range 0 1 MUX_M1 /* gpio 10: [0] */
+ &range 16 7 MUX_M1 /* gpio 10: [1..7] */
+ &range 23 3 MUX_M1 /* gpio 11: [0..2] */
+ &range 28 5 MUX_M1 /* gpio 11: [3..7] */
+ &range 33 3 MUX_M1 /* gpio 12: [0..2] */
+ &range 43 5 MUX_M1 /* gpio 12: [3..7] */
+ &range 48 8 MUX_M1 /* gpio 13: [0..7] */
+ &range 56 8 MUX_M1 /* gpio 14: [0..7] */
+ &range 74 6 MUX_M1 /* gpio 15: [0..5] */
+ &range 122 1 MUX_M1 /* gpio 15: [6] */
+ &range 126 1 MUX_M1 /* gpio 15: [7] */
+ &range 127 8 MUX_M1 /* gpio 16: [0..7] */
+ &range 135 8 MUX_M1 /* gpio 17: [0..7] */
+ &range 143 8 MUX_M1 /* gpio 18: [0..7] */
+ &range 151 8 MUX_M1 /* gpio 19: [0..7] */
+ >;
+ range: gpio-range {
+ #pinctrl-single,gpio-range-cells = <3>;
+ };
+ };
+
+ pmx1: pinmux@f7010800 {
+ compatible = "pinconf-single";
+ reg = <0x0 0xf7010800 0x0 0x28c>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ pinctrl-single,register-width = <32>;
+ };
+
+ pmx2: pinmux@f8001800 {
+ compatible = "pinconf-single";
+ reg = <0x0 0xf8001800 0x0 0x78>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ pinctrl-single,register-width = <32>;
+ };
+
+ gpio0: gpio@f8011000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf8011000 0x0 0x1000>;
+ interrupts = <0 52 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio1: gpio@f8012000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf8012000 0x0 0x1000>;
+ interrupts = <0 53 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio2: gpio@f8013000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf8013000 0x0 0x1000>;
+ interrupts = <0 54 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio3: gpio@f8014000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf8014000 0x0 0x1000>;
+ interrupts = <0 55 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 80 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio4: gpio@f7020000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf7020000 0x0 0x1000>;
+ interrupts = <0 56 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 88 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio5: gpio@f7021000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf7021000 0x0 0x1000>;
+ interrupts = <0 57 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 96 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio6: gpio@f7022000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf7022000 0x0 0x1000>;
+ interrupts = <0 58 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 104 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio7: gpio@f7023000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf7023000 0x0 0x1000>;
+ interrupts = <0 59 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 112 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio8: gpio@f7024000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf7024000 0x0 0x1000>;
+ interrupts = <0 60 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 120 2 &pmx0 2 2 6>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio9: gpio@f7025000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf7025000 0x0 0x1000>;
+ interrupts = <0 61 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 8 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio10: gpio@f7026000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf7026000 0x0 0x1000>;
+ interrupts = <0 62 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 0 1 &pmx0 1 16 7>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio11: gpio@f7027000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf7027000 0x0 0x1000>;
+ interrupts = <0 63 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 23 3 &pmx0 3 28 5>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio12: gpio@f7028000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf7028000 0x0 0x1000>;
+ interrupts = <0 64 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 33 3 &pmx0 3 43 5>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio13: gpio@f7029000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf7029000 0x0 0x1000>;
+ interrupts = <0 65 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 48 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio14: gpio@f702a000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf702a000 0x0 0x1000>;
+ interrupts = <0 66 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 56 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio15: gpio@f702b000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf702b000 0x0 0x1000>;
+ interrupts = <0 67 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <
+ &pmx0 0 74 6
+ &pmx0 6 122 1
+ &pmx0 7 126 1
+ >;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio16: gpio@f702c000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf702c000 0x0 0x1000>;
+ interrupts = <0 68 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 127 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio17: gpio@f702d000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf702d000 0x0 0x1000>;
+ interrupts = <0 69 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 135 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio18: gpio@f702e000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf702e000 0x0 0x1000>;
+ interrupts = <0 70 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 143 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ gpio19: gpio@f702f000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x0 0xf702f000 0x0 0x1000>;
+ interrupts = <0 71 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pmx0 0 151 8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&ao_ctrl 2>;
+ clock-names = "apb_pclk";
+ status = "ok";
+ };
+
+ spi_0: spi@f7106000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x0 0xf7106000 0x0 0x1000>;
+ interrupts = <0 50 4>;
+ bus-id = <0>;
+ enable-dma = <0>;
+ clocks = <&sys_ctrl HI6220_SPI_CLK>;
+ clock-names = "apb_pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_pmx_func &spi0_cfg_func>;
+ num-cs = <1>;
+ cs-gpios = <&gpio6 2 0>;
+ status = "ok";
+ };
+
+ i2c0: i2c@f7100000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x0 0xf7100000 0x0 0x1000>;
+ interrupts = <0 44 4>;
+ clocks = <&sys_ctrl HI6220_I2C0_CLK>;
+ i2c-sda-hold-time-ns = <300>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pmx_func &i2c0_cfg_func>;
status = "disabled";
};
+
+ i2c1: i2c@f7101000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x0 0xf7101000 0x0 0x1000>;
+ clocks = <&sys_ctrl HI6220_I2C1_CLK>;
+ interrupts = <0 45 4>;
+ i2c-sda-hold-time-ns = <300>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pmx_func &i2c1_cfg_func>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@f7102000 {
+ compatible = "snps,designware-i2c";
+ reg = <0x0 0xf7102000 0x0 0x1000>;
+ clocks = <&sys_ctrl HI6220_I2C2_CLK>;
+ interrupts = <0 46 4>;
+ i2c-sda-hold-time-ns = <300>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pmx_func &i2c2_cfg_func>;
+ status = "disabled";
+ };
+
+ fixed_5v_hub: regulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed_5v_hub";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
+ gpio = <&gpio0 7 0>;
+ regulator-always-on;
+ };
+
+ usb_phy: usbphy {
+ compatible = "hisilicon,hi6220-usb-phy";
+ #phy-cells = <0>;
+ phy-supply = <&fixed_5v_hub>;
+ hisilicon,peripheral-syscon = <&sys_ctrl>;
+ };
+
+ usb: usb@f72c0000 {
+ compatible = "hisilicon,hi6220-usb";
+ reg = <0x0 0xf72c0000 0x0 0x40000>;
+ phys = <&usb_phy>;
+ phy-names = "usb2-phy";
+ clocks = <&sys_ctrl HI6220_USBOTG_HCLK>;
+ clock-names = "otg";
+ dr_mode = "otg";
+ g-use-dma;
+ g-rx-fifo-size = <512>;
+ g-np-tx-fifo-size = <128>;
+ g-tx-fifo-size = <128 128 128 128 128 128>;
+ interrupts = <0 77 0x4>;
+ };
+
+ mailbox: mailbox@f7510000 {
+ compatible = "hisilicon,hi6220-mbox";
+ reg = <0x0 0xf7510000 0x0 0x1000>, /* IPC_S */
+ <0x0 0x06dff800 0x0 0x0800>; /* Mailbox buffer */
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <3>;
+ };
+
+ dwmmc_0: dwmmc0@f723d000 {
+ compatible = "hisilicon,hi6220-dw-mshc";
+ num-slots = <0x1>;
+ cap-mmc-highspeed;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ non-removable;
+ reg = <0x0 0xf723d000 0x0 0x1000>;
+ interrupts = <0x0 0x48 0x4>;
+ clocks = <&sys_ctrl 2>, <&sys_ctrl 1>;
+ clock-names = "ciu", "biu";
+ resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>;
+ bus-width = <0x8>;
+ vmmc-supply = <&ldo19>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_pmx_func &emmc_clk_cfg_func
+ &emmc_cfg_func &emmc_rst_cfg_func>;
+ };
+
+ dwmmc_1: dwmmc1@f723e000 {
+ compatible = "hisilicon,hi6220-dw-mshc";
+ num-slots = <0x1>;
+ card-detect-delay = <200>;
+ hisilicon,peripheral-syscon = <&ao_ctrl>;
+ cap-sd-highspeed;
+ reg = <0x0 0xf723e000 0x0 0x1000>;
+ interrupts = <0x0 0x49 0x4>;
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ clocks = <&sys_ctrl 4>, <&sys_ctrl 3>;
+ clock-names = "ciu", "biu";
+ resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>;
+ vqmmc-supply = <&ldo7>;
+ vmmc-supply = <&ldo10>;
+ bus-width = <0x4>;
+ disable-wp;
+ cd-gpios = <&gpio1 0 1>;
+ pinctrl-names = "default", "idle";
+ pinctrl-0 = <&sd_pmx_func &sd_clk_cfg_func &sd_cfg_func>;
+ pinctrl-1 = <&sd_pmx_idle &sd_clk_cfg_idle &sd_cfg_idle>;
+ };
+
+ dwmmc_2: dwmmc2@f723f000 {
+ compatible = "hisilicon,hi6220-dw-mshc";
+ status = "okay";
+ num-slots = <0x1>;
+ reg = <0x0 0xf723f000 0x0 0x1000>;
+ interrupts = <0x0 0x4a 0x4>;
+ clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>;
+ clock-names = "ciu", "biu";
+ resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>;
+ bus-width = <0x4>;
+ broken-cd;
+ pinctrl-names = "default", "idle";
+ pinctrl-0 = <&sdio_pmx_func &sdio_clk_cfg_func &sdio_cfg_func>;
+ pinctrl-1 = <&sdio_pmx_idle &sdio_clk_cfg_idle &sdio_cfg_idle>;
+ };
+
+ tsensor: tsensor@0,f7030700 {
+ compatible = "hisilicon,tsensor";
+ reg = <0x0 0xf7030700 0x0 0x1000>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sys_ctrl 22>;
+ clock-names = "thermal_clk";
+ #thermal-sensor-cells = <1>;
+ };
+
+ thermal-zones {
+
+ cls0: cls0 {
+ polling-delay = <1000>;
+ polling-delay-passive = <100>;
+ sustainable-power = <3326>;
+
+ /* sensor ID */
+ thermal-sensors = <&tsensor 2>;
+
+ trips {
+ threshold: trip-point@0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ target: trip-point@1 {
+ temperature = <75000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&target>;
+ contribution = <1024>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
+ mtcmos {
+ compatible = "hisilicon,hi6220-mtcmos-driver";
+ hisilicon,mtcmos-steady-us = <10>;
+ hisilicon,mtcmos-sc-on-base = <0xf7800000>;
+ hisilicon,mtcmos-acpu-on-base = <0xf65a0000>;
+
+ g3d_vdd: regulator@a1{
+ regulator-name = "G3D_PD_VDD";
+ regulator-compatible = "mtcmos1";
+ hisilicon,ctrl-regs = <0x830 0x834 0x83c>;
+ hisilicon,ctrl-data = <1 0x1>;
+ };
+
+ soc_med: regulator@a2{
+ regulator-name = "SOC_MED";
+ regulator-compatible = "mtcmos2";
+ hisilicon,ctrl-regs = <0x830 0x834 0x83c>;
+ hisilicon,ctrl-data = <2 0x1>;
+ };
+ };
+
+ display-subsystem {
+ compatible = "hisilicon,hi6220-dss";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ade: ade@f4100000 {
+ compatible = "hisilicon,hi6220-ade";
+ reg = <0x0 0xf4100000 0x0 0x7800>,
+ <0x0 0xf4410000 0x0 0x1000>,
+ <0x0 0xf4520000 0x0 0x1000>;
+ reg-names = "ade_base",
+ "media_base",
+ "media_noc_base";
+ interrupts = <0 115 4>; /* ldi interrupt */
+
+ clocks = <&media_ctrl HI6220_ADE_CORE>,
+ <&media_ctrl HI6220_CODEC_JPEG>,
+ <&media_ctrl HI6220_ADE_PIX_SRC>,
+ <&media_ctrl HI6220_PLL_SYS>,
+ <&media_ctrl HI6220_PLL_SYS_MEDIA>;
+ /*clock name*/
+ clock-names = "clk_ade_core",
+ "aclk_codec_jpeg_src",
+ "clk_ade_pix",
+ "clk_syspll_src",
+ "clk_medpll_src";
+ ade_core_clk_rate = <360000000>;
+ media_noc_clk_rate = <288000000>;
+ };
+
+ dsi: dsi@0xf4107800 {
+ compatible = "hisilicon,hi6220-dsi";
+ reg = <0x0 0xf4107800 0x0 0x100>;
+ clocks = <&media_ctrl HI6220_DSI_PCLK>;
+ clock-names = "pclk_dsi";
+
+ port {
+ dsi_out: endpoint {
+ remote-endpoint = <&adv_in>;
+ };
+ };
+ };
+ };
+
+ mali:mali@f4080000 {
+ compatible = "arm,mali-450", "arm,mali-utgard";
+ reg = <0x0 0x3f100000 0x0 0x00708000>;
+ clocks = <&media_ctrl HI6220_G3D_CLK>,
+ <&media_ctrl HI6220_G3D_PCLK>;
+ clock-names = "clk_g3d", "pclk_g3d";
+ G3D_PD_VDD-supply = <&g3d_vdd>;
+ mali_def_freq = <500>;
+ pclk_freq = <144>;
+ dfs_steps = <2>;
+ dfs_lockprf = <1>;
+ dfs_limit_max_prf = <1>;
+ dfs_profile_num = <2>;
+ dfs_profiles = <250 3 0>, <500 1 0>;
+ mali_type = <2>;
+
+ interrupt-parent = <&gic>;
+ interrupts = <1 126 4>, /*gp*/
+ <1 126 4>, /*gp mmu*/
+ <1 126 4>, /*pp bc*/
+ <1 126 4>, /*pmu*/
+ <1 126 4>, /*pp0*/
+ <1 126 4>,
+ <1 126 4>, /*pp1*/
+ <1 126 4>,
+ <1 126 4>, /*pp2*/
+ <1 126 4>,
+ <1 126 4>, /*pp4*/
+ <1 126 4>,
+ <1 126 4>, /*pp5*/
+ <1 126 4>,
+ <1 126 4>, /*pp6*/
+ <1 126 4>;
+ interrupt-names = "IRQGP", "IRQGPMMU", "IRQPP", "IRQPMU",
+ "IRQPP0", "IRQPPMMU0", "IRQPP1", "IRQPPMMU1",
+ "IRQPP2", "IRQPPMMU2","IRQPP4", "IRQPPMMU4",
+ "IRQPP5", "IRQPPMMU5", "IRQPP6", "IRQPPMMU6";
+ };
};
};
diff --git a/arch/arm64/boot/dts/hisilicon/hikey-gpio.dtsi b/arch/arm64/boot/dts/hisilicon/hikey-gpio.dtsi
new file mode 100644
index 000000000000..09242f0ccd35
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hikey-gpio.dtsi
@@ -0,0 +1,607 @@
+/ {
+ gpio_rstout_n:gpio_rstout_n {
+ gpios;
+ };
+ gpio_pmu_peri_en:gpio_pmu_peri_en {
+ gpios;
+ };
+ gpio_sysclk0_en:gpio_sysclk0_en {
+ gpios;
+ };
+ gpio_jtag_tdo:gpio_jtag_tdo {
+ gpios;
+ };
+ /* LCB: PWR_HOLD_GPIO0_0 */
+ gpio_pwr_hold:gpio_pwr_hold {
+ gpios = <&gpio0 0 0>;
+ };
+ /* LCB: DSI_SEL_GPIO0_1 */
+ gpio_dsi_sel:gpio_dsi_sel {
+ gpios = <&gpio0 1 0>;
+ };
+ /* LCB: USB_HUB_RESET_N_GPIO0_2 */
+ gpio_usb_hub_reset_n:gpio_usb_hub_reset_n {
+ gpios = <&gpio0 2 0>;
+ };
+ /* LCB: USB_SEL_GPIO0_3 */
+ gpio_usb_sel:gpio_usb_sel {
+ gpios = <&gpio0 3 0>;
+ };
+ /* LCB: HDMI_PD_GPIO0_4 */
+ gpio_hdmi_pd:gpio_hdmi_pd {
+ gpios = <&gpio0 4 0>;
+ };
+ /* LCB: WL_REG_ON_GPIO0_5 */
+ gpio_wl_en:gpio_wl_en {
+ gpios = <&gpio0 5 0>;
+ };
+ /* LCB: PWRON_DET_GPIO0_6 */
+ gpio_pwron_det:gpio_pwron_det {
+ gpios = <&gpio0 6 0>;
+ };
+ /* LCB: 5V_HUB_EN_GPIO0_7 */
+ gpio_usb_dev_det:gpio_usb_dev_det {
+ gpios = <&gpio0 7 0>;
+ };
+ /* LCB: SD_DET_GPIO1_0 */
+ gpio_sd_det:gpio_sd_det {
+ gpios = <&gpio1 0 0>;
+ };
+ /* LCB: HDMI_INT_GPIO1_1 */
+ gpio_hdmi_int:gpio_hdmi_int {
+ gpios = <&gpio1 1 0>;
+ };
+ /* LCB: PMU_IRQ_N_GPIO1_2 */
+ gpio_pmu_irq_n:gpio_pmu_irq_n {
+ gpios = <&gpio1 2 0>;
+ };
+ /* LCB: WL_HOST_WAKE_GPIO1_3 */
+ gpio_wl_host_wake:gpio_wl_host_wake {
+ gpios = <&gpio1 3 0>;
+ };
+ gpio_nfc_int:gpio_nfc_int {
+ gpios = <&gpio1 4 0>;
+ };
+ gpio_unused_001:gpio_unused_001 {
+ gpios = <&gpio1 5 0>;
+ };
+ /* LCB: BT_REG_ON_GPIO1_7 */
+ gpio_bt_reg_on:gpio_bt_reg_on {
+ gpios = <&gpio1 7 0>;
+ };
+ /* LCB: GPIO2_0, J2 */
+ gpio_j2_2_0:gpio_j2_2_0 {
+ gpios = <&gpio2 0 0>;
+ };
+ /* LCB: GPIO2_1, J2 */
+ gpio_j2_2_1:gpio_j2_2_1 {
+ gpios = <&gpio2 1 0>;
+ };
+ /* LCB: GPIO2_2, J2 */
+ gpio_j2_2_2:gpio_j2_2_2 {
+ gpios = <&gpio2 2 0>;
+ };
+ /* LCB: GPIO2_3, J2 */
+ gpio_j2_2_3:gpio_j2_2_3 {
+ gpios = <&gpio2 3 0>;
+ };
+ /* LCB: GPIO2_4, J2 */
+ gpio_j2_2_4:gpio_j2_2_4 {
+ gpios = <&gpio2 4 0>;
+ };
+ /* LCB: USB_ID_DET_GPIO2_5 */
+ gpio_usb_id_det:gpio_usb_id_det {
+ gpios = <&gpio2 5 0>;
+ };
+ /* LCB: USB_VBUS_DET_GPIO2_6 */
+ gpio_vbus_det:gpio_vbus_det {
+ gpios = <&gpio2 6 0>;
+ };
+ /* LCB: GPIO2_7, J2 */
+ gpio_j2_2_7:gpio_j2_2_7 {
+ gpios = <&gpio2 7 0>;
+ };
+ gpio_rf_reset0:gpio_rf_reset0 {
+ gpios;
+ };
+ gpio_rf_reset1:gpio_rf_reset1 {
+ gpios;
+ };
+ gpio_boot_sel:gpio_boot_sel {
+ gpios = <&gpio10 0 0>;
+ };
+ gpio_pmu_ssi:gpio_pmu_ssi {
+ gpios;
+ };
+ gpio_gps_ref_clk:gpio_gps_ref_clk {
+ gpios = <&gpio8 2 0>;
+ };
+ gpio_sd_clk:gpio_sd_clk {
+ gpios = <&gpio8 3 0>;
+ };
+ gpio_sd_cmd:gpio_sd_cmd {
+ gpios = <&gpio8 4 0>;
+ };
+ gpio_sd_data0:gpio_sd_data0 {
+ gpios = <&gpio8 5 0>;
+ };
+ gpio_sd_data1:gpio_sd_data1 {
+ gpios = <&gpio8 6 0>;
+ };
+ gpio_sd_data2:gpio_sd_data2 {
+ gpios = <&gpio8 7 0>;
+ };
+ gpio_sd_data3:gpio_sd_data3 {
+ gpios = <&gpio9 0 0>;
+ };
+ gpio_unused_002:gpio_unused_002 {
+ gpios;
+ };
+ gpio_mcam_pwdn:gpio_mcam_pwdn {
+ gpios = <&gpio9 1 0>;
+ };
+ gpio_vcm_pwdn:gpio_vcm_pwdn {
+ gpios = <&gpio9 2 0>;
+ };
+ gpio_scam_pwdn:gpio_scam_pwdn {
+ gpios = <&gpio9 3 0>;
+ };
+ gpio_cam_id0:gpio_cam_id0 {
+ gpios = <&gpio9 4 0>;
+ };
+ gpio_cam_id1:gpio_cam_id1 {
+ gpios = <&gpio9 5 0>;
+ };
+ gpio_flash_strobe:gpio_flash_strobe {
+ gpios = <&gpio9 6 0>;
+ };
+ gpio_mcam_mclk:gpio_mcam_mclk {
+ gpios = <&gpio9 7 0>;
+ };
+ gpio_scam_mclk:gpio_scam_mclk {
+ gpios = <&gpio10 1 0>;
+ };
+ gpio_cam_reset0:gpio_cam_reset0 {
+ gpios = <&gpio10 2 0>;
+ };
+ gpio_cam_reset1:gpio_cam_reset1 {
+ gpios = <&gpio10 3 0>;
+ };
+ gpio_tp_rst_n:gpio_tp_rst_n {
+ gpios = <&gpio10 4 0>;
+ };
+ gpio_unused_003:gpio_unused_003 {
+ gpios = <&gpio10 5 0>;
+ };
+ gpio_isp_sda0:gpio_isp_sda0 {
+ gpios = <&gpio10 6 0>;
+ };
+ gpio_isp_scl0:gpio_isp_scl0 {
+ gpios = <&gpio10 7 0>;
+ };
+ gpio_isp_sda1:gpio_isp_sda1 {
+ gpios = <&gpio11 0 0>;
+ };
+ gpio_isp_scl1:gpio_isp_scl1 {
+ gpios = <&gpio11 1 0>;
+ };
+ gpio_mdm_rst:gpio_mdm_rst {
+ gpios = <&gpio11 2 0>;
+ };
+ gpio_hkadc_ssi:gpio_hkadc_ssi {
+ gpios;
+ };
+ gpio_codec_clk:gpio_codec_clk {
+ gpios;
+ };
+ gpio_ap_wakeup_mdm:gpio_ap_wakeup_mdm {
+ gpios = <&gpio11 3 0>;
+ };
+ gpio_codec_sync:gpio_codec_sync {
+ gpios = <&gpio11 4 0>;
+ };
+ gpio_codec_datain:gpio_codec_datain {
+ gpios = <&gpio11 5 0>;
+ };
+ gpio_codec_dataout:gpio_codec_dataout {
+ gpios = <&gpio11 6 0>;
+ };
+ gpio_fm_xclk:gpio_fm_xclk {
+ gpios = <&gpio11 7 0>;
+ };
+ gpio_fm_xfs:gpio_fm_xfs {
+ gpios = <&gpio12 0 0>;
+ };
+ gpio_fm_di:gpio_fm_di {
+ gpios = <&gpio12 1 0>;
+ };
+ gpio_fm_do:gpio_fm_do {
+ gpios = <&gpio12 2 0>;
+ };
+ gpio_bt_xclk:gpio_bt_xclk {
+ gpios;
+ };
+ gpio_bt_xfs:gpio_bt_xfs {
+ gpios;
+ };
+ gpio_bt_di:gpio_bt_di {
+ gpios;
+ };
+ gpio_bt_do:gpio_bt_do {
+ gpios;
+ };
+ gpio_usim0_clk:gpio_usim0_clk {
+ gpios;
+ };
+ gpio_usim0_data:gpio_usim0_data {
+ gpios;
+ };
+ gpio_usim0_rst:gpio_usim0_rst {
+ gpios;
+ };
+ gpio_usim1_clk:gpio_usim1_clk {
+ gpios = <&gpio12 3 0>;
+ };
+ gpio_usim1_data:gpio_usim1_data {
+ gpios = <&gpio12 4 0>;
+ };
+ gpio_usim1_rst:gpio_usim1_rst {
+ gpios = <&gpio12 5 0>;
+ };
+ gpio_unused_004:gpio_unused_004 {
+ gpios = <&gpio12 6 0>;
+ };
+ gpio_unused_005:gpio_unused_005 {
+ gpios = <&gpio12 7 0>;
+ };
+ gpio_uart0_rxd:gpio_uart0_rxd {
+ gpios = <&gpio13 0 0>;
+ };
+ gpio_uart0_txd:gpio_uart0_txd {
+ gpios = <&gpio13 1 0>;
+ };
+ gpio_bt_uart_cts_n:gpio_bt_uart_cts_n {
+ gpios = <&gpio13 2 0>;
+ };
+ gpio_bt_uart_rts_n:gpio_bt_uart_rts_n {
+ gpios = <&gpio13 3 0>;
+ };
+ gpio_bt_uart_rxd:gpio_bt_uart_rxd {
+ gpios = <&gpio13 4 0>;
+ };
+ gpio_bt_uart_txd:gpio_bt_uart_txd {
+ gpios = <&gpio13 5 0>;
+ };
+ gpio_gps_uart_cts_n:gpio_gps_uart_cts_n {
+ gpios = <&gpio13 6 0>;
+ };
+ gpio_gps_uart_rts_n:gpio_gps_uart_rts_n {
+ gpios = <&gpio13 7 0>;
+ };
+ gpio_gps_uart_rxd:gpio_gps_uart_rxd {
+ gpios = <&gpio14 0 0>;
+ };
+ gpio_gps_uart_txd:gpio_gps_uart_txd {
+ gpios = <&gpio14 1 0>;
+ };
+ gpio_i2c0_scl:gpio_i2c0_scl {
+ gpios = <&gpio14 2 0>;
+ };
+ gpio_i2c0_sda:gpio_i2c0_sda {
+ gpios = <&gpio14 3 0>;
+ };
+ gpio_i2c1_scl:gpio_i2c1_scl {
+ gpios = <&gpio14 4 0>;
+ };
+ gpio_i2c1_sda:gpio_i2c1_sda {
+ gpios = <&gpio14 5 0>;
+ };
+ gpio_i2c2_scl:gpio_i2c2_scl {
+ gpios = <&gpio14 6 0>;
+ };
+ gpio_i2c2_sda:gpio_i2c2_sda {
+ gpios = <&gpio14 7 0>;
+ };
+ gpio_emmc_clk:gpio_emmc_clk {
+ gpios;
+ };
+ gpio_emmc_cmd:gpio_emmc_cmd {
+ gpios;
+ };
+ gpio_emmc_data0:gpio_emmc_data0 {
+ gpios;
+ };
+ gpio_emmc_data1:gpio_emmc_data1 {
+ gpios;
+ };
+ gpio_emmc_data2:gpio_emmc_data2 {
+ gpios;
+ };
+ gpio_emmc_data3:gpio_emmc_data3 {
+ gpios;
+ };
+ gpio_emmc_data4:gpio_emmc_data4 {
+ gpios;
+ };
+ gpio_emmc_data5:gpio_emmc_data5 {
+ gpios;
+ };
+ gpio_emmc_data6:gpio_emmc_data6 {
+ gpios;
+ };
+ gpio_emmc_data7:gpio_emmc_data7 {
+ gpios;
+ };
+ gpio_emmc_rst_n:gpio_emmc_rst_n {
+ gpios;
+ };
+ gpio_unused_006:gpio_unused_006 {
+ gpios;
+ };
+ gpio_sdio_clk:gpio_sdio_clk {
+ gpios = <&gpio15 0 0>;
+ };
+ gpio_sdio_cmd:gpio_sdio_cmd {
+ gpios = <&gpio15 1 0>;
+ };
+ gpio_sdio_data0:gpio_sdio_data0 {
+ gpios = <&gpio15 2 0>;
+ };
+ gpio_sdio_data1:gpio_sdio_data1 {
+ gpios = <&gpio15 3 0>;
+ };
+ gpio_sdio_data2:gpio_sdio_data2 {
+ gpios = <&gpio15 4 0>;
+ };
+ gpio_sdio_data3:gpio_sdio_data3 {
+ gpios = <&gpio15 5 0>;
+ };
+ gpio_unused_007:gpio_unused_007 {
+ gpios;
+ };
+ /* LCB: GPIO3_0, on J15, as general purpose input */
+ gpio_j15_3_0:gpio_j15_3_0 {
+ gpios = <&gpio3 0 0>;
+ };
+ gpio_jtag_sel0:gpio_jtag_sel0 {
+ gpios = <&gpio3 1 0>;
+ };
+ gpio_jtag_sel1:gpio_jtag_sel1 {
+ gpios = <&gpio3 2 0>;
+ };
+ gpio_lcd_rst_n:gpio_lcd_rst_n {
+ gpios = <&gpio3 3 0>;
+ };
+ gpio_aux_ssi0:gpio_aux_ssi0 {
+ gpios = <&gpio3 4 0>;
+ };
+ /* LCB: WLAN_ACTIVE_GPIO3_5, connects to led, as general purpose */
+ gpio_wlan_active_led:gpio_wlan_active_led {
+ gpios = <&gpio3 5 0>;
+ };
+ gpio_unused_008:gpio_unused_008 {
+ gpios = <&gpio3 6 0>;
+ };
+ gpio_ap_wakeup_bt:gpio_ap_wakeup_bt {
+ gpios = <&gpio3 7 0>;
+ };
+ /* LCB: USER_LED1_GPIO4_0 */
+ gpio_user_led_1:gpio_user_led_1 {
+ gpios = <&gpio4 0 0>;
+ };
+ /* LCB: USER_LED1_GPIO4_1 */
+ gpio_user_led_2:gpio_user_led_2 {
+ gpios = <&gpio4 1 0>;
+ };
+ /* LCB: USER_LED1_GPIO4_2 */
+ gpio_user_led_3:gpio_user_led_3 {
+ gpios = <&gpio4 2 0>;
+ };
+ /* LCB: USER_LED1_GPIO4_3 */
+ gpio_user_led_4:gpio_user_led_4 {
+ gpios = <&gpio4 3 0>;
+ };
+ gpio_i2c3_scl:gpio_i2c3_scl {
+ gpios = <&gpio4 4 0>;
+ };
+ gpio_i2c3_sda:gpio_i2c3_sda {
+ gpios = <&gpio4 5 0>;
+ };
+ gpio_wlan_bt_priority:gpio_wlan_bt_priority {
+ gpios = <&gpio4 6 0>;
+ };
+ /* LCB: BT_ACTIVE_GPIO4_7, connects to led, as general purpose */
+ gpio_bt_active_led:gpio_bt_active_led {
+ gpios = <&gpio4 7 0>;
+ };
+ gpio_uart3_cts_n:gpio_uart3_cts_n {
+ gpios = <&gpio5 0 0>;
+ };
+ gpio_uart3_rts_n:gpio_uart3_rts_n {
+ gpios = <&gpio5 1 0>;
+ };
+ gpio_uart3_rxd:gpio_uart3_rxd {
+ gpios = <&gpio5 2 0>;
+ };
+ gpio_uart3_txd:gpio_uart3_txd {
+ gpios = <&gpio5 3 0>;
+ };
+ gpio_aux_ssi1:gpio_aux_ssi1 {
+ gpios = <&gpio5 4 0>;
+ };
+ gpio_unused_009:gpio_unused_009 {
+ gpios = <&gpio5 5 0>;
+ };
+ gpio_modem_pcm_xclk:gpio_modem_pcm_xclk {
+ gpios = <&gpio5 6 0>;
+ };
+ gpio_modem_pcm_xfs:gpio_modem_pcm_xfs {
+ gpios = <&gpio5 7 0>;
+ };
+ gpio_spi0_di:gpio_spi0_di {
+ gpios = <&gpio6 0 0>;
+ };
+ gpio_spi0_do:gpio_spi0_do {
+ gpios = <&gpio6 1 0>;
+ };
+ gpio_spi0_cs_n:gpio_spi0_cs_n {
+ gpios = <&gpio6 2 0>;
+ };
+ gpio_spi0_clk:gpio_spi0_clk {
+ gpios = <&gpio6 3 0>;
+ };
+ gpio_lte_tx_active:gpio_lte_tx_active {
+ gpios = <&gpio6 4 0>;
+ };
+ gpio_lte_rx_active:gpio_lte_rx_active {
+ gpios = <&gpio6 5 0>;
+ };
+ gpio_lcd_id0:gpio_lcd_id0 {
+ gpios = <&gpio6 6 0>;
+ };
+ /* LCB: GPIO6_7_DSI_TE0 */
+ gpio_dsi_te0:gpio_dsi_te0 {
+ gpios = <&gpio6 7 0>;
+ };
+ gpio_lcd_id1:gpio_lcd_id1 {
+ gpios = <&gpio7 0 0>;
+ };
+ gpio_volume1_n:gpio_volume1_n {
+ gpios = <&gpio7 1 0>;
+ };
+ gpio_uart5_rxd:gpio_uart5_rxd {
+ gpios = <&gpio7 2 0>;
+ };
+ gpio_uart5_txd:gpio_uart5_txd {
+ gpios = <&gpio7 3 0>;
+ };
+ gpio_modem_pcm_di:gpio_modem_pcm_di {
+ gpios = <&gpio7 4 0>;
+ };
+ gpio_modem_pcm_do:gpio_modem_pcm_do {
+ gpios = <&gpio7 5 0>;
+ };
+ gpio_uart4_rxd:gpio_uart4_rxd {
+ gpios = <&gpio7 6 0>;
+ };
+ gpio_uart4_txd:gpio_uart4_txd {
+ gpios = <&gpio7 7 0>;
+ };
+ gpio_ap_wakeup_wl:gpio_ap_wakeup_wl {
+ gpios = <&gpio8 0 0>;
+ };
+ gpio_mdm_pwr_en:gpio_mdm_pwr_en {
+ gpios = <&gpio8 1 0>;
+ };
+ gpio_tcxo0_afc:gpio_tcxo0_afc {
+ gpios = <&gpio15 6 0>;
+ };
+ gpio_rf_ssi0:gpio_rf_ssi0 {
+ gpios;
+ };
+ gpio_rf_tcvr_on0:gpio_rf_tcvr_on0 {
+ gpios;
+ };
+ gpio_rf_mipi_clk0:gpio_rf_mipi_clk0 {
+ gpios;
+ };
+ gpio_rf_mipi_data0:gpio_rf_mipi_data0 {
+ gpios = <&gpio15 7 0>;
+ };
+ gpio_flash_mask:gpio_flash_mask {
+ gpios = <&gpio16 0 0>;
+ };
+ gpio_gps_blanking:gpio_gps_blanking {
+ gpios = <&gpio16 1 0>;
+ };
+ gpio_rf_gpio_2:gpio_rf_gpio_2 {
+ gpios = <&gpio16 2 0>;
+ };
+ gpio_rf_gpio_3:gpio_rf_gpio_3 {
+ gpios = <&gpio16 3 0>;
+ };
+ gpio_rf_gpio_4:gpio_rf_gpio_4 {
+ gpios = <&gpio16 4 0>;
+ };
+ gpio_rf_gpio_5:gpio_rf_gpio_5 {
+ gpios = <&gpio16 5 0>;
+ };
+ gpio_rf_gpio_6:gpio_rf_gpio_6 {
+ gpios = <&gpio16 6 0>;
+ };
+ gpio_rf_gpio_7:gpio_rf_gpio_7 {
+ gpios = <&gpio16 7 0>;
+ };
+ gpio_rf_gpio_8:gpio_rf_gpio_8 {
+ gpios = <&gpio17 0 0>;
+ };
+ gpio_rf_gpio_9:gpio_rf_gpio_9 {
+ gpios = <&gpio17 1 0>;
+ };
+ gpio_rf_gpio_10:gpio_rf_gpio_10 {
+ gpios = <&gpio17 2 0>;
+ };
+ gpio_rf_gpio_11:gpio_rf_gpio_11 {
+ gpios = <&gpio17 3 0>;
+ };
+ gpio_rf_gpio_12:gpio_rf_gpio_12 {
+ gpios = <&gpio17 4 0>;
+ };
+ gpio_rf_gpio_13:gpio_rf_gpio_13 {
+ gpios = <&gpio17 5 0>;
+ };
+ gpio_rf_gpio_14:gpio_rf_gpio_14 {
+ gpios = <&gpio17 6 0>;
+ };
+ gpio_rf_gpio_15:gpio_rf_gpio_15 {
+ gpios = <&gpio17 7 0>;
+ };
+ gpio_rf_gpio_16:gpio_rf_gpio_16 {
+ gpios = <&gpio18 0 0>;
+ };
+ gpio_rf_gpio_17:gpio_rf_gpio_17 {
+ gpios = <&gpio18 1 0>;
+ };
+ gpio_rf_gpio_18:gpio_rf_gpio_18 {
+ gpios = <&gpio18 2 0>;
+ };
+ gpio_rf_gpio_19:gpio_rf_gpio_19 {
+ gpios = <&gpio18 3 0>;
+ };
+ gpio_rf_gpio_20:gpio_rf_gpio_20 {
+ gpios = <&gpio18 4 0>;
+ };
+ gpio_rf_gpio_21:gpio_rf_gpio_21 {
+ gpios = <&gpio18 5 0>;
+ };
+ gpio_rf_gpio_22:gpio_rf_gpio_22 {
+ gpios = <&gpio18 6 0>;
+ };
+ gpio_rf_gpio_23:gpio_rf_gpio_23 {
+ gpios = <&gpio18 7 0>;
+ };
+ gpio_rf_gpio_24:gpio_rf_gpio_24 {
+ gpios = <&gpio19 0 0>;
+ };
+ gpio_rf_gpio_25:gpio_rf_gpio_25 {
+ gpios = <&gpio19 1 0>;
+ };
+ gpio_rf_gpio_26:gpio_rf_gpio_26 {
+ gpios = <&gpio19 2 0>;
+ };
+ gpio_rf_ssi1:gpio_rf_ssi1 {
+ gpios = <&gpio19 3 0>;
+ };
+ gpio_rf_tcvr_on1:gpio_rf_tcvr_on1 {
+ gpios = <&gpio19 4 0>;
+ };
+ gpio_rf_gpio_29:gpio_rf_gpio_29 {
+ gpios = <&gpio19 5 0>;
+ };
+ gpio_rf_gpio_30:gpio_rf_gpio_30 {
+ gpios = <&gpio19 6 0>;
+ };
+ gpio_apt_pdm0:gpio_apt_pdm0 {
+ gpios = <&gpio19 7 0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hikey-pinctrl.dtsi b/arch/arm64/boot/dts/hisilicon/hikey-pinctrl.dtsi
new file mode 100644
index 000000000000..0916e8459d6b
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hikey-pinctrl.dtsi
@@ -0,0 +1,705 @@
+/*
+ * pinctrl dts fils for Hislicon HiKey development board
+ *
+ */
+#include <dt-bindings/pinctrl/hisi.h>
+
+/ {
+ soc {
+ pmx0: pinmux@f7010000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <
+ &boot_sel_pmx_func
+ &hkadc_ssi_pmx_func
+ &codec_clk_pmx_func
+ &pwm_in_pmx_func
+ &bl_pwm_pmx_func
+ >;
+
+ boot_sel_pmx_func: boot_sel_pmx_func {
+ pinctrl-single,pins = <
+ 0x0 MUX_M0 /* BOOT_SEL (IOMG000) */
+ >;
+ };
+
+ emmc_pmx_func: emmc_pmx_func {
+ pinctrl-single,pins = <
+ 0x100 MUX_M0 /* EMMC_CLK (IOMG064) */
+ 0x104 MUX_M0 /* EMMC_CMD (IOMG065) */
+ 0x108 MUX_M0 /* EMMC_DATA0 (IOMG066) */
+ 0x10c MUX_M0 /* EMMC_DATA1 (IOMG067) */
+ 0x110 MUX_M0 /* EMMC_DATA2 (IOMG068) */
+ 0x114 MUX_M0 /* EMMC_DATA3 (IOMG069) */
+ 0x118 MUX_M0 /* EMMC_DATA4 (IOMG070) */
+ 0x11c MUX_M0 /* EMMC_DATA5 (IOMG071) */
+ 0x120 MUX_M0 /* EMMC_DATA6 (IOMG072) */
+ 0x124 MUX_M0 /* EMMC_DATA7 (IOMG073) */
+ >;
+ };
+
+ sd_pmx_func: sd_pmx_func {
+ pinctrl-single,pins = <
+ 0xc MUX_M0 /* SD_CLK (IOMG003) */
+ 0x10 MUX_M0 /* SD_CMD (IOMG004) */
+ 0x14 MUX_M0 /* SD_DATA0 (IOMG005) */
+ 0x18 MUX_M0 /* SD_DATA1 (IOMG006) */
+ 0x1c MUX_M0 /* SD_DATA2 (IOMG007) */
+ 0x20 MUX_M0 /* SD_DATA3 (IOMG008) */
+ >;
+ };
+ sd_pmx_idle: sd_pmx_idle {
+ pinctrl-single,pins = <
+ 0xc MUX_M1 /* SD_CLK (IOMG003) */
+ 0x10 MUX_M1 /* SD_CMD (IOMG004) */
+ 0x14 MUX_M1 /* SD_DATA0 (IOMG005) */
+ 0x18 MUX_M1 /* SD_DATA1 (IOMG006) */
+ 0x1c MUX_M1 /* SD_DATA2 (IOMG007) */
+ 0x20 MUX_M1 /* SD_DATA3 (IOMG008) */
+ >;
+ };
+
+ sdio_pmx_func: sdio_pmx_func {
+ pinctrl-single,pins = <
+ 0x128 MUX_M0 /* SDIO_CLK (IOMG074) */
+ 0x12c MUX_M0 /* SDIO_CMD (IOMG075) */
+ 0x130 MUX_M0 /* SDIO_DATA0 (IOMG076) */
+ 0x134 MUX_M0 /* SDIO_DATA1 (IOMG077) */
+ 0x138 MUX_M0 /* SDIO_DATA2 (IOMG078) */
+ 0x13c MUX_M0 /* SDIO_DATA3 (IOMG079) */
+ >;
+ };
+ sdio_pmx_idle: sdio_pmx_idle {
+ pinctrl-single,pins = <
+ 0x128 MUX_M1 /* SDIO_CLK (IOMG074) */
+ 0x12c MUX_M1 /* SDIO_CMD (IOMG075) */
+ 0x130 MUX_M1 /* SDIO_DATA0 (IOMG076) */
+ 0x134 MUX_M1 /* SDIO_DATA1 (IOMG077) */
+ 0x138 MUX_M1 /* SDIO_DATA2 (IOMG078) */
+ 0x13c MUX_M1 /* SDIO_DATA3 (IOMG079) */
+ >;
+ };
+
+ isp_pmx_func: isp_pmx_func {
+ pinctrl-single,pins = <
+ 0x24 MUX_M0 /* ISP_PWDN0 (IOMG009) */
+ 0x28 MUX_M0 /* ISP_PWDN1 (IOMG010) */
+ 0x2c MUX_M0 /* ISP_PWDN2 (IOMG011) */
+ 0x30 MUX_M1 /* ISP_SHUTTER0 (IOMG012) */
+ 0x34 MUX_M1 /* ISP_SHUTTER1 (IOMG013) */
+ 0x38 MUX_M1 /* ISP_PWM (IOMG014) */
+ 0x3c MUX_M0 /* ISP_CCLK0 (IOMG015) */
+ 0x40 MUX_M0 /* ISP_CCLK1 (IOMG016) */
+ 0x44 MUX_M0 /* ISP_RESETB0 (IOMG017) */
+ 0x48 MUX_M0 /* ISP_RESETB1 (IOMG018) */
+ 0x4c MUX_M1 /* ISP_STROBE0 (IOMG019) */
+ 0x50 MUX_M1 /* ISP_STROBE1 (IOMG020) */
+ 0x54 MUX_M0 /* ISP_SDA0 (IOMG021) */
+ 0x58 MUX_M0 /* ISP_SCL0 (IOMG022) */
+ 0x5c MUX_M0 /* ISP_SDA1 (IOMG023) */
+ 0x60 MUX_M0 /* ISP_SCL1 (IOMG024) */
+ >;
+ };
+
+ hkadc_ssi_pmx_func: hkadc_ssi_pmx_func {
+ pinctrl-single,pins = <
+ 0x68 MUX_M0 /* HKADC_SSI (IOMG026) */
+ >;
+ };
+
+ codec_clk_pmx_func: codec_clk_pmx_func {
+ pinctrl-single,pins = <
+ 0x6c MUX_M0 /* CODEC_CLK (IOMG027) */
+ >;
+ };
+
+ codec_pmx_func: codec_pmx_func {
+ pinctrl-single,pins = <
+ 0x70 MUX_M1 /* DMIC_CLK (IOMG028) */
+ 0x74 MUX_M0 /* CODEC_SYNC (IOMG029) */
+ 0x78 MUX_M0 /* CODEC_DI (IOMG030) */
+ 0x7c MUX_M0 /* CODEC_DO (IOMG031) */
+ >;
+ };
+
+ fm_pmx_func: fm_pmx_func {
+ pinctrl-single,pins = <
+ 0x80 MUX_M1 /* FM_XCLK (IOMG032) */
+ 0x84 MUX_M1 /* FM_XFS (IOMG033) */
+ 0x88 MUX_M1 /* FM_DI (IOMG034) */
+ 0x8c MUX_M1 /* FM_DO (IOMG035) */
+ >;
+ };
+
+ bt_pmx_func: bt_pmx_func {
+ pinctrl-single,pins = <
+ 0x90 MUX_M0 /* BT_XCLK (IOMG036) */
+ 0x94 MUX_M0 /* BT_XFS (IOMG037) */
+ 0x98 MUX_M0 /* BT_DI (IOMG038) */
+ 0x9c MUX_M0 /* BT_DO (IOMG039) */
+ >;
+ };
+
+ pwm_in_pmx_func: pwm_in_pmx_func {
+ pinctrl-single,pins = <
+ 0xb8 MUX_M1 /* PWM_IN (IOMG046) */
+ >;
+ };
+
+ bl_pwm_pmx_func: bl_pwm_pmx_func {
+ pinctrl-single,pins = <
+ 0xbc MUX_M1 /* BL_PWM (IOMG047) */
+ >;
+ };
+
+ uart0_pmx_func: uart0_pmx_func {
+ pinctrl-single,pins = <
+ 0xc0 MUX_M0 /* UART0_RXD (IOMG048) */
+ 0xc4 MUX_M0 /* UART0_TXD (IOMG049) */
+ >;
+ };
+
+ uart1_pmx_func: uart1_pmx_func {
+ pinctrl-single,pins = <
+ 0xc8 MUX_M0 /* UART1_CTS_N (IOMG050) */
+ 0xcc MUX_M0 /* UART1_RTS_N (IOMG051) */
+ 0xd0 MUX_M0 /* UART1_RXD (IOMG052) */
+ 0xd4 MUX_M0 /* UART1_TXD (IOMG053) */
+ >;
+ };
+
+ uart2_pmx_func: uart2_pmx_func {
+ pinctrl-single,pins = <
+ 0xd8 MUX_M0 /* UART2_CTS_N (IOMG054) */
+ 0xdc MUX_M0 /* UART2_RTS_N (IOMG055) */
+ 0xe0 MUX_M0 /* UART2_RXD (IOMG056) */
+ 0xe4 MUX_M0 /* UART2_TXD (IOMG057) */
+ >;
+ };
+
+ uart3_pmx_func: uart3_pmx_func {
+ pinctrl-single,pins = <
+ 0x180 MUX_M1 /* UART3_CTS_N (IOMG096) */
+ 0x184 MUX_M1 /* UART3_RTS_N (IOMG097) */
+ 0x188 MUX_M1 /* UART3_RXD (IOMG098) */
+ 0x18c MUX_M1 /* UART3_TXD (IOMG099) */
+ >;
+ };
+
+ uart4_pmx_func: uart4_pmx_func {
+ pinctrl-single,pins = <
+ 0x1d0 MUX_M1 /* UART4_CTS_N (IOMG116) */
+ 0x1d4 MUX_M1 /* UART4_RTS_N (IOMG117) */
+ 0x1d8 MUX_M1 /* UART4_RXD (IOMG118) */
+ 0x1dc MUX_M1 /* UART4_TXD (IOMG119) */
+ >;
+ };
+
+ uart5_pmx_func: uart5_pmx_func {
+ pinctrl-single,pins = <
+ 0x1c8 MUX_M1 /* UART5_RXD (IOMG114) */
+ 0x1cc MUX_M1 /* UART5_TXD (IOMG115) */
+ >;
+ };
+
+ i2c0_pmx_func: i2c0_pmx_func {
+ pinctrl-single,pins = <
+ 0xe8 MUX_M0 /* I2C0_SCL (IOMG058) */
+ 0xec MUX_M0 /* I2C0_SDA (IOMG059) */
+ >;
+ };
+
+ i2c1_pmx_func: i2c1_pmx_func {
+ pinctrl-single,pins = <
+ 0xf0 MUX_M0 /* I2C1_SCL (IOMG060) */
+ 0xf4 MUX_M0 /* I2C1_SDA (IOMG061) */
+ >;
+ };
+
+ i2c2_pmx_func: i2c2_pmx_func {
+ pinctrl-single,pins = <
+ 0xf8 MUX_M0 /* I2C2_SCL (IOMG062) */
+ 0xfc MUX_M0 /* I2C2_SDA (IOMG063) */
+ >;
+ };
+
+ spi0_pmx_func: spi0_pmx_func {
+ pinctrl-single,pins = <
+ 0x1a0 MUX_M1 /* SPI0_DI (IOMG104) */
+ 0x1a4 MUX_M1 /* SPI0_DO (IOMG105) */
+ 0x1a8 MUX_M1 /* SPI0_CS_N (IOMG106) */
+ 0x1ac MUX_M1 /* SPI0_CLK (IOMG107) */
+ >;
+ };
+ };
+
+ pmx1: pinmux@f7010800 {
+
+ pinctrl-names = "default";
+ pinctrl-0 = <
+ &boot_sel_cfg_func
+ &hkadc_ssi_cfg_func
+ &codec_clk_cfg_func
+ &pwm_in_cfg_func
+ &bl_pwm_cfg_func
+ >;
+
+ boot_sel_cfg_func: boot_sel_cfg_func {
+ pinctrl-single,pins = <
+ 0x0 0x0 /* BOOT_SEL (IOCFG000) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_UP PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ hkadc_ssi_cfg_func: hkadc_ssi_cfg_func {
+ pinctrl-single,pins = <
+ 0x6c 0x0 /* HKADC_SSI (IOCFG027) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ emmc_clk_cfg_func: emmc_clk_cfg_func {
+ pinctrl-single,pins = <
+ 0x104 0x0 /* EMMC_CLK (IOCFG065) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_08MA DRIVE_MASK>;
+ };
+
+ emmc_cfg_func: emmc_cfg_func {
+ pinctrl-single,pins = <
+ 0x108 0x0 /* EMMC_CMD (IOCFG066) */
+ 0x10c 0x0 /* EMMC_DATA0 (IOCFG067) */
+ 0x110 0x0 /* EMMC_DATA1 (IOCFG068) */
+ 0x114 0x0 /* EMMC_DATA2 (IOCFG069) */
+ 0x118 0x0 /* EMMC_DATA3 (IOCFG070) */
+ 0x11c 0x0 /* EMMC_DATA4 (IOCFG071) */
+ 0x120 0x0 /* EMMC_DATA5 (IOCFG072) */
+ 0x124 0x0 /* EMMC_DATA6 (IOCFG073) */
+ 0x128 0x0 /* EMMC_DATA7 (IOCFG074) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_UP PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_04MA DRIVE_MASK>;
+ };
+
+ emmc_rst_cfg_func: emmc_rst_cfg_func {
+ pinctrl-single,pins = <
+ 0x12c 0x0 /* EMMC_RST_N (IOCFG075) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_04MA DRIVE_MASK>;
+ };
+
+ sd_clk_cfg_func: sd_clk_cfg_func {
+ pinctrl-single,pins = <
+ 0xc 0x0 /* SD_CLK (IOCFG003) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_10MA DRIVE_MASK>;
+ };
+ sd_clk_cfg_idle: sd_clk_cfg_idle {
+ pinctrl-single,pins = <
+ 0xc 0x0 /* SD_CLK (IOCFG003) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DOWN PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ sd_cfg_func: sd_cfg_func {
+ pinctrl-single,pins = <
+ 0x10 0x0 /* SD_CMD (IOCFG004) */
+ 0x14 0x0 /* SD_DATA0 (IOCFG005) */
+ 0x18 0x0 /* SD_DATA1 (IOCFG006) */
+ 0x1c 0x0 /* SD_DATA2 (IOCFG007) */
+ 0x20 0x0 /* SD_DATA3 (IOCFG008) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_08MA DRIVE_MASK>;
+ };
+ sd_cfg_idle: sd_cfg_idle {
+ pinctrl-single,pins = <
+ 0x10 0x0 /* SD_CMD (IOCFG004) */
+ 0x14 0x0 /* SD_DATA0 (IOCFG005) */
+ 0x18 0x0 /* SD_DATA1 (IOCFG006) */
+ 0x1c 0x0 /* SD_DATA2 (IOCFG007) */
+ 0x20 0x0 /* SD_DATA3 (IOCFG008) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DOWN PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ sdio_clk_cfg_func: sdio_clk_cfg_func {
+ pinctrl-single,pins = <
+ 0x134 0x0 /* SDIO_CLK (IOCFG077) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_08MA DRIVE_MASK>;
+ };
+ sdio_clk_cfg_idle: sdio_clk_cfg_idle {
+ pinctrl-single,pins = <
+ 0x134 0x0 /* SDIO_CLK (IOCFG077) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DOWN PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ sdio_cfg_func: sdio_cfg_func {
+ pinctrl-single,pins = <
+ 0x138 0x0 /* SDIO_CMD (IOCFG078) */
+ 0x13c 0x0 /* SDIO_DATA0 (IOCFG079) */
+ 0x140 0x0 /* SDIO_DATA1 (IOCFG080) */
+ 0x144 0x0 /* SDIO_DATA2 (IOCFG081) */
+ 0x148 0x0 /* SDIO_DATA3 (IOCFG082) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_UP PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_04MA DRIVE_MASK>;
+ };
+ sdio_cfg_idle: sdio_cfg_idle {
+ pinctrl-single,pins = <
+ 0x138 0x0 /* SDIO_CMD (IOCFG078) */
+ 0x13c 0x0 /* SDIO_DATA0 (IOCFG079) */
+ 0x140 0x0 /* SDIO_DATA1 (IOCFG080) */
+ 0x144 0x0 /* SDIO_DATA2 (IOCFG081) */
+ 0x148 0x0 /* SDIO_DATA3 (IOCFG082) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_UP PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ isp_cfg_func1: isp_cfg_func1 {
+ pinctrl-single,pins = <
+ 0x28 0x0 /* ISP_PWDN0 (IOCFG010) */
+ 0x2c 0x0 /* ISP_PWDN1 (IOCFG011) */
+ 0x30 0x0 /* ISP_PWDN2 (IOCFG012) */
+ 0x34 0x0 /* ISP_SHUTTER0 (IOCFG013) */
+ 0x38 0x0 /* ISP_SHUTTER1 (IOCFG014) */
+ 0x3c 0x0 /* ISP_PWM (IOCFG015) */
+ 0x40 0x0 /* ISP_CCLK0 (IOCFG016) */
+ 0x44 0x0 /* ISP_CCLK1 (IOCFG017) */
+ 0x48 0x0 /* ISP_RESETB0 (IOCFG018) */
+ 0x4c 0x0 /* ISP_RESETB1 (IOCFG019) */
+ 0x50 0x0 /* ISP_STROBE0 (IOCFG020) */
+ 0x58 0x0 /* ISP_SDA0 (IOCFG022) */
+ 0x5c 0x0 /* ISP_SCL0 (IOCFG023) */
+ 0x60 0x0 /* ISP_SDA1 (IOCFG024) */
+ 0x64 0x0 /* ISP_SCL1 (IOCFG025) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+ isp_cfg_idle1: isp_cfg_idle1 {
+ pinctrl-single,pins = <
+ 0x34 0x0 /* ISP_SHUTTER0 (IOCFG013) */
+ 0x38 0x0 /* ISP_SHUTTER1 (IOCFG014) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DOWN PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ isp_cfg_func2: isp_cfg_func2 {
+ pinctrl-single,pins = <
+ 0x54 0x0 /* ISP_STROBE1 (IOCFG021) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DOWN PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ codec_clk_cfg_func: codec_clk_cfg_func {
+ pinctrl-single,pins = <
+ 0x70 0x0 /* CODEC_CLK (IOCFG028) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_04MA DRIVE_MASK>;
+ };
+ codec_clk_cfg_idle: codec_clk_cfg_idle {
+ pinctrl-single,pins = <
+ 0x70 0x0 /* CODEC_CLK (IOCFG028) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ codec_cfg_func1: codec_cfg_func1 {
+ pinctrl-single,pins = <
+ 0x74 0x0 /* DMIC_CLK (IOCFG029) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DOWN PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ codec_cfg_func2: codec_cfg_func2 {
+ pinctrl-single,pins = <
+ 0x78 0x0 /* CODEC_SYNC (IOCFG030) */
+ 0x7c 0x0 /* CODEC_DI (IOCFG031) */
+ 0x80 0x0 /* CODEC_DO (IOCFG032) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_04MA DRIVE_MASK>;
+ };
+ codec_cfg_idle2: codec_cfg_idle2 {
+ pinctrl-single,pins = <
+ 0x78 0x0 /* CODEC_SYNC (IOCFG030) */
+ 0x7c 0x0 /* CODEC_DI (IOCFG031) */
+ 0x80 0x0 /* CODEC_DO (IOCFG032) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ fm_cfg_func: fm_cfg_func {
+ pinctrl-single,pins = <
+ 0x84 0x0 /* FM_XCLK (IOCFG033) */
+ 0x88 0x0 /* FM_XFS (IOCFG034) */
+ 0x8c 0x0 /* FM_DI (IOCFG035) */
+ 0x90 0x0 /* FM_DO (IOCFG036) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DOWN PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ bt_cfg_func: bt_cfg_func {
+ pinctrl-single,pins = <
+ 0x94 0x0 /* BT_XCLK (IOCFG037) */
+ 0x98 0x0 /* BT_XFS (IOCFG038) */
+ 0x9c 0x0 /* BT_DI (IOCFG039) */
+ 0xa0 0x0 /* BT_DO (IOCFG040) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+ bt_cfg_idle: bt_cfg_idle {
+ pinctrl-single,pins = <
+ 0x94 0x0 /* BT_XCLK (IOCFG037) */
+ 0x98 0x0 /* BT_XFS (IOCFG038) */
+ 0x9c 0x0 /* BT_DI (IOCFG039) */
+ 0xa0 0x0 /* BT_DO (IOCFG040) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DOWN PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ pwm_in_cfg_func: pwm_in_cfg_func {
+ pinctrl-single,pins = <
+ 0xbc 0x0 /* PWM_IN (IOCFG047) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DOWN PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ bl_pwm_cfg_func: bl_pwm_cfg_func {
+ pinctrl-single,pins = <
+ 0xc0 0x0 /* BL_PWM (IOCFG048) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DOWN PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ uart0_cfg_func1: uart0_cfg_func1 {
+ pinctrl-single,pins = <
+ 0xc4 0x0 /* UART0_RXD (IOCFG049) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_UP PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ uart0_cfg_func2: uart0_cfg_func2 {
+ pinctrl-single,pins = <
+ 0xc8 0x0 /* UART0_TXD (IOCFG050) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_04MA DRIVE_MASK>;
+ };
+
+ uart1_cfg_func1: uart1_cfg_func1 {
+ pinctrl-single,pins = <
+ 0xcc 0x0 /* UART1_CTS_N (IOCFG051) */
+ 0xd4 0x0 /* UART1_RXD (IOCFG053) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_UP PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ uart1_cfg_func2: uart1_cfg_func2 {
+ pinctrl-single,pins = <
+ 0xd0 0x0 /* UART1_RTS_N (IOCFG052) */
+ 0xd8 0x0 /* UART1_TXD (IOCFG054) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ uart2_cfg_func: uart2_cfg_func {
+ pinctrl-single,pins = <
+ 0xdc 0x0 /* UART2_CTS_N (IOCFG055) */
+ 0xe0 0x0 /* UART2_RTS_N (IOCFG056) */
+ 0xe4 0x0 /* UART2_RXD (IOCFG057) */
+ 0xe8 0x0 /* UART2_TXD (IOCFG058) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ uart3_cfg_func: uart3_cfg_func {
+ pinctrl-single,pins = <
+ 0x190 0x0 /* UART3_CTS_N (IOCFG100) */
+ 0x194 0x0 /* UART3_RTS_N (IOCFG101) */
+ 0x198 0x0 /* UART3_RXD (IOCFG102) */
+ 0x19c 0x0 /* UART3_TXD (IOCFG103) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DOWN PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ uart4_cfg_func: uart4_cfg_func {
+ pinctrl-single,pins = <
+ 0x1e0 0x0 /* UART4_CTS_N (IOCFG120) */
+ 0x1e4 0x0 /* UART4_RTS_N (IOCFG121) */
+ 0x1e8 0x0 /* UART4_RXD (IOCFG122) */
+ 0x1ec 0x0 /* UART4_TXD (IOCFG123) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DOWN PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ uart5_cfg_func: uart5_cfg_func {
+ pinctrl-single,pins = <
+ 0x1d8 0x0 /* UART4_RXD (IOCFG118) */
+ 0x1dc 0x0 /* UART4_TXD (IOCFG119) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DOWN PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ i2c0_cfg_func: i2c0_cfg_func {
+ pinctrl-single,pins = <
+ 0xec 0x0 /* I2C0_SCL (IOCFG059) */
+ 0xf0 0x0 /* I2C0_SDA (IOCFG060) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ i2c1_cfg_func: i2c1_cfg_func {
+ pinctrl-single,pins = <
+ 0xf4 0x0 /* I2C1_SCL (IOCFG061) */
+ 0xf8 0x0 /* I2C1_SDA (IOCFG062) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ i2c2_cfg_func: i2c2_cfg_func {
+ pinctrl-single,pins = <
+ 0xfc 0x0 /* I2C2_SCL (IOCFG063) */
+ 0x100 0x0 /* I2C2_SDA (IOCFG064) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ spi0_cfg_func: spi0_cfg_func {
+ pinctrl-single,pins = <
+ 0x1b0 0x0 /* SPI0_DI (IOCFG108) */
+ 0x1b4 0x0 /* SPI0_DO (IOCFG109) */
+ 0x1b8 0x0 /* SPI0_CS_N (IOCFG110) */
+ 0x1bc 0x0 /* SPI0_CLK (IOCFG111) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+ };
+
+ pmx2: pinmux@f8001800 {
+
+ pinctrl-names = "default";
+ pinctrl-0 = <
+ &rstout_n_cfg_func
+ >;
+
+ rstout_n_cfg_func: rstout_n_cfg_func {
+ pinctrl-single,pins = <
+ 0x0 0x0 /* RSTOUT_N (IOCFG000) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ pmu_peri_en_cfg_func: pmu_peri_en_cfg_func {
+ pinctrl-single,pins = <
+ 0x4 0x0 /* PMU_PERI_EN (IOCFG001) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ sysclk0_en_cfg_func: sysclk0_en_cfg_func {
+ pinctrl-single,pins = <
+ 0x8 0x0 /* SYSCLK0_EN (IOCFG002) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+
+ jtag_tdo_cfg_func: jtag_tdo_cfg_func {
+ pinctrl-single,pins = <
+ 0xc 0x0 /* JTAG_TDO (IOCFG003) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_08MA DRIVE_MASK>;
+ };
+
+ rf_reset_cfg_func: rf_reset_cfg_func {
+ pinctrl-single,pins = <
+ 0x70 0x0 /* RF_RESET0 (IOCFG028) */
+ 0x74 0x0 /* RF_RESET1 (IOCFG029) */
+ >;
+ pinctrl-single,bias-pulldown = <PULL_DIS PULL_DOWN PULL_DIS PULL_DOWN>;
+ pinctrl-single,bias-pullup = <PULL_DIS PULL_UP PULL_DIS PULL_UP>;
+ pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/configs/hikey_defconfig b/arch/arm64/configs/hikey_defconfig
new file mode 100644
index 000000000000..b345302c08f1
--- /dev/null
+++ b/arch/arm64/configs/hikey_defconfig
@@ -0,0 +1,500 @@
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_MEMCG_KMEM=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_XGENE=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_XGENE=y
+CONFIG_SCHED_MC=y
+CONFIG_SCHED_SMT=y
+CONFIG_PREEMPT=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_CMDLINE="console=ttyAMA0"
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_HISI_ACPU_CPUFREQ=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+# CONFIG_IPV6_SIT is not set
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETLABEL=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_IP6_NF_NAT=y
+CONFIG_IP6_NF_TARGET_MASQUERADE=y
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_WILINK=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_CFG80211_REG_DEBUG=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_MESH=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=y
+CONFIG_RFKILL_REGULATOR=y
+CONFIG_RFKILL_GPIO=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=64
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_VIRTIO_BLK=y
+CONFIG_UID_CPUTIME=y
+CONFIG_TI_ST=y
+CONFIG_ST_HCI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_XGENE=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=y
+CONFIG_TUN=y
+CONFIG_VETH=y
+CONFIG_VIRTIO_NET=y
+CONFIG_NET_XGENE=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_USB_PEGASUS=y
+CONFIG_USB_RTL8150=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_DM9601=y
+CONFIG_USB_NET_SR9800=y
+CONFIG_USB_NET_SMSC75XX=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_USB_NET_PLUSB=y
+CONFIG_USB_NET_MCS7830=y
+CONFIG_WL_TI=y
+CONFIG_WL18XX=y
+CONFIG_WLCORE_SDIO=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_HISI_POWERKEY=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_LEGACY_PTY_COUNT=16
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_VIRTIO_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_SPI=y
+CONFIG_SPI_PL022=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_HISI_THERMAL=y
+CONFIG_MFD_HI655X_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_HI6220_MTCMOS=y
+CONFIG_REGULATOR_HI655X=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+CONFIG_MALI400=y
+CONFIG_MALI450=y
+CONFIG_MALI400_DEBUG=y
+# CONFIG_MALI400_PROFILING is not set
+# CONFIG_MALI_DVFS is not set
+CONFIG_MALI_SHARED_INTERRUPTS=y
+CONFIG_MALI_DT=y
+CONFIG_MALI_PLAT_SPECIFIC_DT=y
+CONFIG_DRM=y
+CONFIG_DRM_CMA_FBDEV_BUFFER_NUM=2
+CONFIG_DRM_I2C_ADV7511=y
+CONFIG_DRM_HISI=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_FB_SIMPLE=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_ACRUX=y
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EMS_FF=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KEYTOUCH=y
+CONFIG_HID_KYE=y
+CONFIG_HID_WALTOP=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LCPOWER=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PRIMAX=y
+CONFIG_HID_SAITEK=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HID_ZYDACRON=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_SIMPLE=y
+CONFIG_USB_SERIAL_FTDI_SIO=y
+CONFIG_USB_SERIAL_PL2303=y
+CONFIG_USB_SERIAL_QUALCOMM=y
+CONFIG_USB_SERIAL_OPTION=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_MMC=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_BLOCK_MINORS=64
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_K3=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_REGULATOR=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_XGENE=y
+CONFIG_DMADEVICES=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_ION=y
+CONFIG_ION_HISI=y
+CONFIG_STUB_CLK_HI6220=y
+CONFIG_MAILBOX=y
+CONFIG_HI6220_MBOX=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_RESET_CONTROLLER=y
+CONFIG_PHY_HI6220_USB=y
+CONFIG_PHY_XGENE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_REBOOT_REASON_SRAM=y
+CONFIG_EFI_VARS=y
+CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_BTRFS_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZ4=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_PMSG=y
+CONFIG_PSTORE_FTRACE=y
+CONFIG_PSTORE_RAM=y
+CONFIG_NFS_FS=y
+# CONFIG_NFS_V2 is not set
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=5
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_DEBUG_RODATA=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_NETWORK_XFRM=y
+CONFIG_LSM_MMAP_MIN_ADDR=4096
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC_T10DIF=y
diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h
index 4e603ea36ad3..123f45d92cd1 100644
--- a/arch/arm64/include/asm/opcodes.h
+++ b/arch/arm64/include/asm/opcodes.h
@@ -1 +1,5 @@
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define CONFIG_CPU_ENDIAN_BE8 CONFIG_CPU_BIG_ENDIAN
+#endif
+
#include <../../arm/include/asm/opcodes.h>
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 63f52b55defe..eaa9cabf4066 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -34,7 +34,7 @@
/*
* VMALLOC and SPARSEMEM_VMEMMAP ranges.
*
- * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array
+ * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
* (rounded up to PUD_SIZE).
* VMALLOC_START: beginning of the kernel VA space
* VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
@@ -51,7 +51,9 @@
#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
-#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
+#define VMEMMAP_START (VMALLOC_END + SZ_64K)
+#define vmemmap ((struct page *)VMEMMAP_START - \
+ SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT))
#define FIRST_USER_ADDRESS 0UL
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 8aee3aeec3e6..c1492ba1f6d1 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -186,20 +186,21 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
/* EL1 Single Step Handler hooks */
static LIST_HEAD(step_hook);
-static DEFINE_RWLOCK(step_hook_lock);
+static DEFINE_SPINLOCK(step_hook_lock);
void register_step_hook(struct step_hook *hook)
{
- write_lock(&step_hook_lock);
- list_add(&hook->node, &step_hook);
- write_unlock(&step_hook_lock);
+ spin_lock(&step_hook_lock);
+ list_add_rcu(&hook->node, &step_hook);
+ spin_unlock(&step_hook_lock);
}
void unregister_step_hook(struct step_hook *hook)
{
- write_lock(&step_hook_lock);
- list_del(&hook->node);
- write_unlock(&step_hook_lock);
+ spin_lock(&step_hook_lock);
+ list_del_rcu(&hook->node);
+ spin_unlock(&step_hook_lock);
+ synchronize_rcu();
}
/*
@@ -213,15 +214,15 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
struct step_hook *hook;
int retval = DBG_HOOK_ERROR;
- read_lock(&step_hook_lock);
+ rcu_read_lock();
- list_for_each_entry(hook, &step_hook, node) {
+ list_for_each_entry_rcu(hook, &step_hook, node) {
retval = hook->fn(regs, esr);
if (retval == DBG_HOOK_HANDLED)
break;
}
- read_unlock(&step_hook_lock);
+ rcu_read_unlock();
return retval;
}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 23cfc08fc8ba..b685257926f0 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -512,9 +512,14 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
#endif
/* EL2 debug */
+ mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
+ sbfx x0, x0, #8, #4
+ cmp x0, #1
+ b.lt 4f // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps
ubfx x0, x0, #11, #5 // to EL2 and allow access to
msr mdcr_el2, x0 // all PMU counters from EL1
+4:
/* Stage-2 translation */
msr vttbr_el2, xzr
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 5b1897e8ca24..62d3dc60ca09 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -574,9 +574,6 @@ static void armv8pmu_reset(void *info)
/* Initialize & Reset PMNC: C and P bits. */
armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
-
- /* Disable access from userspace. */
- asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
}
static int armv8_pmuv3_map_event(struct perf_event *event)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 1971f491bb90..ff7f13239515 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -58,6 +58,12 @@
*/
void ptrace_disable(struct task_struct *child)
{
+ /*
+ * This would be better off in core code, but PTRACE_DETACH has
+ * grown its fair share of arch-specific worts and changing it
+ * is likely to cause regressions on obscure architectures.
+ */
+ user_disable_single_step(child);
}
#ifdef CONFIG_HAVE_HW_BREAKPOINT
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index d250160d32bc..3039f080e2d5 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -186,7 +186,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id);
- return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
+ return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
}
/**
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 6ad554b01e95..ddfb97a299f2 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -933,6 +933,10 @@ static int __init __iommu_dma_init(void)
ret = register_iommu_dma_ops_notifier(&platform_bus_type);
if (!ret)
ret = register_iommu_dma_ops_notifier(&amba_bustype);
+
+ /* handle devices queued before this arch_initcall */
+ if (!ret)
+ __iommu_attach_notifier(NULL, BUS_NOTIFY_ADD_DEVICE, NULL);
return ret;
}
arch_initcall(__iommu_dma_init);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 325f89ff897e..224238943aff 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -321,8 +321,8 @@ void __init mem_init(void)
#endif
MLG(VMALLOC_START, VMALLOC_END),
#ifdef CONFIG_SPARSEMEM_VMEMMAP
- MLG((unsigned long)vmemmap,
- (unsigned long)vmemmap + VMEMMAP_SIZE),
+ MLG(VMEMMAP_START,
+ VMEMMAP_START + VMEMMAP_SIZE),
MLM((unsigned long)virt_to_page(PAGE_OFFSET),
(unsigned long)virt_to_page(high_memory)),
#endif
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 873e363048c6..116ad654dd59 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -456,6 +456,9 @@ void __init paging_init(void)
empty_zero_page = virt_to_page(zero_page);
+ /* Ensure the zero page is visible to the page table walker */
+ dsb(ishst);
+
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries.
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 3571c7309c5e..cf6240741134 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -57,6 +57,9 @@ static int change_memory_common(unsigned long addr, int numpages,
if (end < MODULES_VADDR || end >= MODULES_END)
return -EINVAL;
+ if (!numpages)
+ return 0;
+
data.set_mask = set_mask;
data.clear_mask = clear_mask;
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
index 4c4d93c4bf65..d69dffffaa89 100644
--- a/arch/arm64/mm/proc-macros.S
+++ b/arch/arm64/mm/proc-macros.S
@@ -62,3 +62,15 @@
bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
#endif
.endm
+
+/*
+ * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
+ */
+ .macro reset_pmuserenr_el0, tmpreg
+ mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
+ sbfx \tmpreg, \tmpreg, #8, #4
+ cmp \tmpreg, #1 // Skip if no PMU present
+ b.lt 9000f
+ msr pmuserenr_el0, xzr // Disable PMU access from EL0
+9000:
+ .endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index cacecc4ad3e5..b8f04b3f2786 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -117,6 +117,7 @@ ENTRY(cpu_do_resume)
*/
ubfx x11, x11, #1, #1
msr oslar_el1, x11
+ reset_pmuserenr_el0 x0 // Disable PMU access from EL0
mov x0, x12
dsb nsh // Make sure local tlb invalidation completed
isb
@@ -155,6 +156,7 @@ ENTRY(__cpu_setup)
msr cpacr_el1, x0 // Enable FP/ASIMD
mov x0, #1 << 12 // Reset mdscr_el1 and disable
msr mdscr_el1, x0 // access to the DCC from EL0
+ reset_pmuserenr_el0 x0 // Disable PMU access from EL0
/*
* Memory region attributes for LPAE:
*
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 9041bbe2b7b4..8fdb9c7eeb66 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -436,6 +436,7 @@ static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned lo
return ioremap(phys_addr, size);
}
#define ioremap_cache ioremap_cache
+#define ioremap_uc ioremap_nocache
/*
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
index 0392112a5d70..a5ecef7188ba 100644
--- a/arch/m32r/kernel/setup.c
+++ b/arch/m32r/kernel/setup.c
@@ -81,7 +81,10 @@ static struct resource code_resource = {
};
unsigned long memory_start;
+EXPORT_SYMBOL(memory_start);
+
unsigned long memory_end;
+EXPORT_SYMBOL(memory_end);
void __init setup_arch(char **);
int get_cpuinfo(char *);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 71683a853372..db459612de44 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2155,7 +2155,7 @@ config MIPS_MT_SMP
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select SYNC_R4K
- select MIPS_GIC_IPI
+ select MIPS_GIC_IPI if MIPS_GIC
select MIPS_MT
select SMP
select SMP_UP
@@ -2253,7 +2253,7 @@ config MIPS_VPE_APSP_API_MT
config MIPS_CMP
bool "MIPS CMP framework support (DEPRECATED)"
depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6
- select MIPS_GIC_IPI
+ select MIPS_GIC_IPI if MIPS_GIC
select SMP
select SYNC_R4K
select SYS_SUPPORTS_SMP
@@ -2273,7 +2273,7 @@ config MIPS_CPS
select MIPS_CM
select MIPS_CPC
select MIPS_CPS_PM if HOTPLUG_CPU
- select MIPS_GIC_IPI
+ select MIPS_GIC_IPI if MIPS_GIC
select SMP
select SYNC_R4K if (CEVT_R4K || CSRC_R4K)
select SYS_SUPPORTS_HOTPLUG_CPU
@@ -2292,6 +2292,7 @@ config MIPS_CPS_PM
bool
config MIPS_GIC_IPI
+ depends on MIPS_GIC
bool
config MIPS_CM
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index bdeed9d13c6f..433c4b9a9f0a 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -503,15 +503,15 @@ int __init db1000_dev_setup(void)
if (board == BCSR_WHOAMI_DB1500) {
c0 = AU1500_GPIO2_INT;
c1 = AU1500_GPIO5_INT;
- d0 = AU1500_GPIO0_INT;
- d1 = AU1500_GPIO3_INT;
+ d0 = 0; /* GPIO number, NOT irq! */
+ d1 = 3; /* GPIO number, NOT irq! */
s0 = AU1500_GPIO1_INT;
s1 = AU1500_GPIO4_INT;
} else if (board == BCSR_WHOAMI_DB1100) {
c0 = AU1100_GPIO2_INT;
c1 = AU1100_GPIO5_INT;
- d0 = AU1100_GPIO0_INT;
- d1 = AU1100_GPIO3_INT;
+ d0 = 0; /* GPIO number, NOT irq! */
+ d1 = 3; /* GPIO number, NOT irq! */
s0 = AU1100_GPIO1_INT;
s1 = AU1100_GPIO4_INT;
@@ -545,15 +545,15 @@ int __init db1000_dev_setup(void)
} else if (board == BCSR_WHOAMI_DB1000) {
c0 = AU1000_GPIO2_INT;
c1 = AU1000_GPIO5_INT;
- d0 = AU1000_GPIO0_INT;
- d1 = AU1000_GPIO3_INT;
+ d0 = 0; /* GPIO number, NOT irq! */
+ d1 = 3; /* GPIO number, NOT irq! */
s0 = AU1000_GPIO1_INT;
s1 = AU1000_GPIO4_INT;
platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs));
} else if ((board == BCSR_WHOAMI_PB1500) ||
(board == BCSR_WHOAMI_PB1500R2)) {
c0 = AU1500_GPIO203_INT;
- d0 = AU1500_GPIO201_INT;
+ d0 = 1; /* GPIO number, NOT irq! */
s0 = AU1500_GPIO202_INT;
twosocks = 0;
flashsize = 64;
@@ -566,7 +566,7 @@ int __init db1000_dev_setup(void)
*/
} else if (board == BCSR_WHOAMI_PB1100) {
c0 = AU1100_GPIO11_INT;
- d0 = AU1100_GPIO9_INT;
+ d0 = 9; /* GPIO number, NOT irq! */
s0 = AU1100_GPIO10_INT;
twosocks = 0;
flashsize = 64;
@@ -583,7 +583,6 @@ int __init db1000_dev_setup(void)
} else
return 0; /* unknown board, no further dev setup to do */
- irq_set_irq_type(d0, IRQ_TYPE_EDGE_BOTH);
irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW);
@@ -597,7 +596,6 @@ int __init db1000_dev_setup(void)
c0, d0, /*s0*/0, 0, 0);
if (twosocks) {
- irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH);
irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW);
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
index 5740bcfdfc7f..6c37b9326f41 100644
--- a/arch/mips/alchemy/devboards/db1550.c
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -514,7 +514,7 @@ static void __init db1550_devices(void)
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
- AU1550_GPIO3_INT, AU1550_GPIO0_INT,
+ AU1550_GPIO3_INT, 0,
/*AU1550_GPIO21_INT*/0, 0, 0);
db1x_register_pcmcia_socket(
@@ -524,7 +524,7 @@ static void __init db1550_devices(void)
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1,
- AU1550_GPIO5_INT, AU1550_GPIO1_INT,
+ AU1550_GPIO5_INT, 1,
/*AU1550_GPIO22_INT*/0, 0, 1);
platform_device_register(&db1550_nand_dev);
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 2046c0230224..21ed7150fec3 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -33,7 +33,7 @@
#define PAGE_SHIFT 16
#endif
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE - 1))
+#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
/*
* This is used for calculating the real page sizes
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 8957f15e21ec..18826aa15a7c 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -353,7 +353,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
static inline pte_t pte_mkyoung(pte_t pte)
{
pte_val(pte) |= _PAGE_ACCESSED;
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
if (!(pte_val(pte) & _PAGE_NO_READ))
pte_val(pte) |= _PAGE_SILENT_READ;
else
@@ -560,7 +560,7 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
pmd_val(pmd) |= _PAGE_ACCESSED;
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
if (!(pmd_val(pmd) & _PAGE_NO_READ))
pmd_val(pmd) |= _PAGE_SILENT_READ;
else
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 6499d93ae68d..47bc45a67e9b 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -101,10 +101,8 @@ static inline void syscall_get_arguments(struct task_struct *task,
/* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
if ((config_enabled(CONFIG_32BIT) ||
test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
- (regs->regs[2] == __NR_syscall)) {
+ (regs->regs[2] == __NR_syscall))
i++;
- n++;
- }
while (n--)
ret |= mips_get_syscall_arg(args++, task, regs, i++);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index bd4385a8e6e8..2b521e07b860 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -121,6 +121,7 @@ static inline void calculate_cpu_foreign_map(void)
cpumask_t temp_foreign_map;
/* Re-calculate the mask */
+ cpumask_clear(&temp_foreign_map);
for_each_online_cpu(i) {
core_present = 0;
for_each_cpu(k, &temp_foreign_map)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 886cb1976e90..ca9a81007489 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -690,15 +690,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
asmlinkage void do_ov(struct pt_regs *regs)
{
enum ctx_state prev_state;
- siginfo_t info;
+ siginfo_t info = {
+ .si_signo = SIGFPE,
+ .si_code = FPE_INTOVF,
+ .si_addr = (void __user *)regs->cp0_epc,
+ };
prev_state = exception_enter();
die_if_kernel("Integer overflow", regs);
- info.si_code = FPE_INTOVF;
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
exception_exit(prev_state);
}
@@ -874,7 +874,7 @@ out:
void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
const char *str)
{
- siginfo_t info;
+ siginfo_t info = { 0 };
char b[40];
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
@@ -903,7 +903,6 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
else
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
- info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
break;
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 490cea569d57..5c62065cbf22 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -885,7 +885,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
{
union mips_instruction insn;
unsigned long value;
- unsigned int res;
+ unsigned int res, preempted;
unsigned long origpc;
unsigned long orig31;
void __user *fault_addr = NULL;
@@ -1226,27 +1226,36 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
goto sigbus;
- /*
- * Disable preemption to avoid a race between copying
- * state from userland, migrating to another CPU and
- * updating the hardware vector register below.
- */
- preempt_disable();
-
- res = __copy_from_user_inatomic(fpr, addr,
- sizeof(*fpr));
- if (res)
- goto fault;
-
- /*
- * Update the hardware register if it is in use by the
- * task in this quantum, in order to avoid having to
- * save & restore the whole vector context.
- */
- if (test_thread_flag(TIF_USEDMSA))
- write_msa_wr(wd, fpr, df);
+ do {
+ /*
+ * If we have live MSA context keep track of
+ * whether we get preempted in order to avoid
+ * the register context we load being clobbered
+ * by the live context as it's saved during
+ * preemption. If we don't have live context
+ * then it can't be saved to clobber the value
+ * we load.
+ */
+ preempted = test_thread_flag(TIF_USEDMSA);
+
+ res = __copy_from_user_inatomic(fpr, addr,
+ sizeof(*fpr));
+ if (res)
+ goto fault;
- preempt_enable();
+ /*
+ * Update the hardware register if it is in use
+ * by the task in this quantum, in order to
+ * avoid having to save & restore the whole
+ * vector context.
+ */
+ preempt_disable();
+ if (test_thread_flag(TIF_USEDMSA)) {
+ write_msa_wr(wd, fpr, df);
+ preempted = 0;
+ }
+ preempt_enable();
+ } while (preempted);
break;
case msa_st_op:
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index b9b803facdbf..2683d04fdda5 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -702,7 +702,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
void __user *uaddr = (void __user *)(long)reg->addr;
- return copy_to_user(uaddr, vs, 16);
+ return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
} else {
return -EINVAL;
}
@@ -732,7 +732,7 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
void __user *uaddr = (void __user *)(long)reg->addr;
- return copy_from_user(vs, uaddr, 16);
+ return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
} else {
return -EINVAL;
}
diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
index bf9f1a77f0e5..a2631a52ca99 100644
--- a/arch/mips/loongson64/loongson-3/hpet.c
+++ b/arch/mips/loongson64/loongson-3/hpet.c
@@ -13,6 +13,9 @@
#define SMBUS_PCI_REG64 0x64
#define SMBUS_PCI_REGB4 0xb4
+#define HPET_MIN_CYCLES 64
+#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
+
static DEFINE_SPINLOCK(hpet_lock);
DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
@@ -161,8 +164,9 @@ static int hpet_next_event(unsigned long delta,
cnt += delta;
hpet_write(HPET_T0_CMP, cnt);
- res = ((int)(hpet_read(HPET_COUNTER) - cnt) > 0) ? -ETIME : 0;
- return res;
+ res = (int)(cnt - hpet_read(HPET_COUNTER));
+
+ return res < HPET_MIN_CYCLES ? -ETIME : 0;
}
static irqreturn_t hpet_irq_handler(int irq, void *data)
@@ -237,7 +241,7 @@ void __init setup_hpet_timer(void)
cd->cpumask = cpumask_of(cpu);
clockevent_set_clock(cd, HPET_FREQ);
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
- cd->min_delta_ns = 5000;
+ cd->min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, cd);
clockevents_register_device(cd);
setup_irq(HPET_T0_IRQ, &hpet_irq);
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index 1a4738a8f2d3..509832a9836c 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -30,13 +30,13 @@
#include "smp.h"
DEFINE_PER_CPU(int, cpu_state);
-DEFINE_PER_CPU(uint32_t, core0_c0count);
static void *ipi_set0_regs[16];
static void *ipi_clear0_regs[16];
static void *ipi_status0_regs[16];
static void *ipi_en0_regs[16];
static void *ipi_mailbox_buf[16];
+static uint32_t core0_c0count[NR_CPUS];
/* read a 32bit value from ipi register */
#define loongson3_ipi_read32(addr) readl(addr)
@@ -275,12 +275,14 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
if (action & SMP_ASK_C0COUNT) {
BUG_ON(cpu != 0);
c0count = read_c0_count();
- for (i = 1; i < num_possible_cpus(); i++)
- per_cpu(core0_c0count, i) = c0count;
+ c0count = c0count ? c0count : 1;
+ for (i = 1; i < nr_cpu_ids; i++)
+ core0_c0count[i] = c0count;
+ __wbflush(); /* Let others see the result ASAP */
}
}
-#define MAX_LOOPS 1111
+#define MAX_LOOPS 800
/*
* SMP init and finish on secondary CPUs
*/
@@ -305,16 +307,20 @@ static void loongson3_init_secondary(void)
cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
i = 0;
- __this_cpu_write(core0_c0count, 0);
+ core0_c0count[cpu] = 0;
loongson3_send_ipi_single(0, SMP_ASK_C0COUNT);
- while (!__this_cpu_read(core0_c0count)) {
+ while (!core0_c0count[cpu]) {
i++;
cpu_relax();
}
if (i > MAX_LOOPS)
i = MAX_LOOPS;
- initcount = __this_cpu_read(core0_c0count) + i;
+ if (cpu_data[cpu].package)
+ initcount = core0_c0count[cpu] + i;
+ else /* Local access is faster for loops */
+ initcount = core0_c0count[cpu] + i/2;
+
write_c0_count(initcount);
}
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 3bd0597d9c3d..ddb8154610cc 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -164,11 +164,13 @@ static int __init mips_sc_probe_cm3(void)
sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK;
sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF;
- c->scache.sets = 64 << sets;
+ if (sets)
+ c->scache.sets = 64 << sets;
line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK;
line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF;
- c->scache.linesz = 2 << line_sz;
+ if (line_sz)
+ c->scache.linesz = 2 << line_sz;
assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK;
assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF;
@@ -176,9 +178,12 @@ static int __init mips_sc_probe_cm3(void)
c->scache.waysize = c->scache.sets * c->scache.linesz;
c->scache.waybit = __ffs(c->scache.waysize);
- c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+ if (c->scache.linesz) {
+ c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+ return 1;
+ }
- return 1;
+ return 0;
}
void __weak platform_early_l2_init(void)
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 32e0be27673f..29f73e00253d 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -242,7 +242,7 @@ static void output_pgtable_bits_defines(void)
pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
#endif
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
if (cpu_has_rixi) {
#ifdef _PAGE_NO_EXEC_SHIFT
pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h
index 7d56a9ccb752..a65d888716c4 100644
--- a/arch/parisc/include/asm/hugetlb.h
+++ b/arch/parisc/include/asm/hugetlb.h
@@ -54,24 +54,12 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
return pte_wrprotect(pte);
}
-static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- pte_t old_pte = *ptep;
- set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
-}
+void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
-static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
- pte_t pte, int dirty)
-{
- int changed = !pte_same(*ptep, pte);
- if (changed) {
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
- flush_tlb_page(vma, addr);
- }
- return changed;
-}
+ pte_t pte, int dirty);
static inline pte_t huge_ptep_get(pte_t *ptep)
{
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 0abdd4c607ed..1960b87c1c8b 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -76,6 +76,7 @@ struct exception_table_entry {
*/
struct exception_data {
unsigned long fault_ip;
+ unsigned long fault_gp;
unsigned long fault_space;
unsigned long fault_addr;
};
diff --git a/arch/parisc/include/uapi/asm/siginfo.h b/arch/parisc/include/uapi/asm/siginfo.h
index d7034728f377..1c75565d984b 100644
--- a/arch/parisc/include/uapi/asm/siginfo.h
+++ b/arch/parisc/include/uapi/asm/siginfo.h
@@ -1,6 +1,10 @@
#ifndef _PARISC_SIGINFO_H
#define _PARISC_SIGINFO_H
+#if defined(__LP64__)
+#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
+#endif
+
#include <asm-generic/siginfo.h>
#undef NSIGTRAP
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index d2f62570a7b1..78d30d2ea2d8 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -299,6 +299,7 @@ int main(void)
#endif
BLANK();
DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
+ DEFINE(EXCDATA_GP, offsetof(struct exception_data, fault_gp));
DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
BLANK();
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 568b2c61ea02..3cad8aadc69e 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -47,11 +47,11 @@ EXPORT_SYMBOL(__cmpxchg_u64);
EXPORT_SYMBOL(lclear_user);
EXPORT_SYMBOL(lstrnlen_user);
-/* Global fixups */
-extern void fixup_get_user_skip_1(void);
-extern void fixup_get_user_skip_2(void);
-extern void fixup_put_user_skip_1(void);
-extern void fixup_put_user_skip_2(void);
+/* Global fixups - defined as int to avoid creation of function pointers */
+extern int fixup_get_user_skip_1;
+extern int fixup_get_user_skip_2;
+extern int fixup_put_user_skip_1;
+extern int fixup_put_user_skip_2;
EXPORT_SYMBOL(fixup_get_user_skip_1);
EXPORT_SYMBOL(fixup_get_user_skip_2);
EXPORT_SYMBOL(fixup_put_user_skip_1);
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 9585c81f755f..ce0b2b4075c7 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -269,14 +269,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
long do_syscall_trace_enter(struct pt_regs *regs)
{
- long ret = 0;
-
/* Do the secure computing check first. */
secure_computing_strict(regs->gr[20]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs))
- ret = -1L;
+ tracehook_report_syscall_entry(regs)) {
+ /*
+ * Tracing decided this syscall should not happen or the
+ * debugger stored an invalid system call number. Skip
+ * the system call and the system call restart handling.
+ */
+ regs->gr[20] = -1UL;
+ goto out;
+ }
#ifdef CONFIG_64BIT
if (!is_compat_task())
@@ -290,7 +295,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
regs->gr[24] & 0xffffffff,
regs->gr[23] & 0xffffffff);
- return ret ? : regs->gr[20];
+out:
+ return regs->gr[20];
}
void do_syscall_trace_exit(struct pt_regs *regs)
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 3fbd7252a4b2..fbafa0d0e2bf 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -343,7 +343,7 @@ tracesys_next:
#endif
comiclr,>>= __NR_Linux_syscalls, %r20, %r0
- b,n .Lsyscall_nosys
+ b,n .Ltracesys_nosys
LDREGX %r20(%r19), %r19
@@ -359,6 +359,9 @@ tracesys_next:
be 0(%sr7,%r19)
ldo R%tracesys_exit(%r2),%r2
+.Ltracesys_nosys:
+ ldo -ENOSYS(%r0),%r28 /* set errno */
+
/* Do *not* call this function on the gateway page, because it
makes a direct call to syscall_trace. */
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 553b09855cfd..77e2262c97f6 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -798,6 +798,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
if (fault_space == 0 && !faulthandler_disabled())
{
+ /* Clean up and return if in exception table. */
+ if (fixup_exception(regs))
+ return;
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
parisc_terminate("Kernel Fault", regs, code, fault_address);
}
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
index 536ef66bb94b..1052b747e011 100644
--- a/arch/parisc/lib/fixup.S
+++ b/arch/parisc/lib/fixup.S
@@ -26,6 +26,7 @@
#ifdef CONFIG_SMP
.macro get_fault_ip t1 t2
+ loadgp
addil LT%__per_cpu_offset,%r27
LDREG RT%__per_cpu_offset(%r1),\t1
/* t2 = smp_processor_id() */
@@ -40,14 +41,19 @@
LDREG RT%exception_data(%r1),\t1
/* t1 = this_cpu_ptr(&exception_data) */
add,l \t1,\t2,\t1
+ /* %r27 = t1->fault_gp - restore gp */
+ LDREG EXCDATA_GP(\t1), %r27
/* t1 = t1->fault_ip */
LDREG EXCDATA_IP(\t1), \t1
.endm
#else
.macro get_fault_ip t1 t2
+ loadgp
/* t1 = this_cpu_ptr(&exception_data) */
addil LT%exception_data,%r27
LDREG RT%exception_data(%r1),\t2
+ /* %r27 = t2->fault_gp - restore gp */
+ LDREG EXCDATA_GP(\t2), %r27
/* t1 = t2->fault_ip */
LDREG EXCDATA_IP(\t2), \t1
.endm
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index a762864ec92e..f9064449908a 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -151,6 +151,7 @@ int fixup_exception(struct pt_regs *regs)
struct exception_data *d;
d = this_cpu_ptr(&exception_data);
d->fault_ip = regs->iaoq[0];
+ d->fault_gp = regs->gr[27];
d->fault_space = regs->isr;
d->fault_addr = regs->ior;
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
index f6fdc77a72bd..54ba39262b82 100644
--- a/arch/parisc/mm/hugetlbpage.c
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -105,15 +105,13 @@ static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long ad
addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
- mtsp(mm->context, 1);
- pdtlb(addr);
- if (unlikely(split_tlb))
- pitlb(addr);
+ purge_tlb_entries(mm, addr);
addr += (1UL << REAL_HPAGE_SHIFT);
}
}
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+/* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
+static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
unsigned long addr_start;
@@ -123,14 +121,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
addr_start = addr;
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
- /* Directly write pte entry. We could call set_pte_at(mm, addr, ptep, entry)
- * instead, but then we get double locking on pa_tlb_lock. */
- *ptep = entry;
+ set_pte(ptep, entry);
ptep++;
- /* Drop the PAGE_SIZE/non-huge tlb entry */
- purge_tlb_entries(mm, addr);
-
addr += PAGE_SIZE;
pte_val(entry) += PAGE_SIZE;
}
@@ -138,18 +131,61 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
purge_tlb_entries_huge(mm, addr_start);
}
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t entry)
+{
+ unsigned long flags;
+
+ purge_tlb_start(flags);
+ __set_huge_pte_at(mm, addr, ptep, entry);
+ purge_tlb_end(flags);
+}
+
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
+ unsigned long flags;
pte_t entry;
+ purge_tlb_start(flags);
entry = *ptep;
- set_huge_pte_at(mm, addr, ptep, __pte(0));
+ __set_huge_pte_at(mm, addr, ptep, __pte(0));
+ purge_tlb_end(flags);
return entry;
}
+
+void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long flags;
+ pte_t old_pte;
+
+ purge_tlb_start(flags);
+ old_pte = *ptep;
+ __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+ purge_tlb_end(flags);
+}
+
+int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ unsigned long flags;
+ int changed;
+
+ purge_tlb_start(flags);
+ changed = !pte_same(*ptep, pte);
+ if (changed) {
+ __set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ }
+ purge_tlb_end(flags);
+ return changed;
+}
+
+
int pmd_huge(pmd_t pmd)
{
return 0;
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index ad6263cffb0f..d1a8d93cccfd 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned long val)
unsigned long prev;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%2 \n"
PPC405_ERR77(0,%2)
" stwcx. %3,0,%2 \n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (prev), "+m" (*(volatile unsigned int *)p)
: "r" (p), "r" (val)
: "cc", "memory");
@@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned long val)
unsigned long prev;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%2 \n"
PPC405_ERR77(0,%2)
" stdcx. %3,0,%2 \n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (prev), "+m" (*(volatile unsigned long *)p)
: "r" (p), "r" (val)
: "cc", "memory");
@@ -151,14 +151,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
unsigned int prev;
__asm__ __volatile__ (
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
cmpw 0,%0,%3\n\
bne- 2f\n"
PPC405_ERR77(0,%2)
" stwcx. %4,0,%2\n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
"\n\
2:"
: "=&r" (prev), "+m" (*p)
@@ -197,13 +197,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
unsigned long prev;
__asm__ __volatile__ (
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
cmpd 0,%0,%3\n\
bne- 2f\n\
stdcx. %4,0,%2\n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
"\n\
2:"
: "=&r" (prev), "+m" (*p)
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index c5eb86f3d452..867c39b45df6 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -81,6 +81,7 @@ struct pci_dn;
#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */
#define EEH_PE_REMOVED (1 << 10) /* Removed permanently */
+#define EEH_PE_PRI_BUS (1 << 11) /* Cached primary bus */
struct eeh_pe {
int type; /* PE type: PHB/Bus/Device */
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index 8374afed9d0a..f8faaaeeca1e 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -157,7 +157,8 @@
#define OPAL_LEDS_GET_INDICATOR 114
#define OPAL_LEDS_SET_INDICATOR 115
#define OPAL_CEC_REBOOT2 116
-#define OPAL_LAST 116
+#define OPAL_CONSOLE_FLUSH 117
+#define OPAL_LAST 117
/* Device tree flags */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 800115910e43..07a99e638449 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -35,6 +35,7 @@ int64_t opal_console_read(int64_t term_number, __be64 *length,
uint8_t *buffer);
int64_t opal_console_write_buffer_space(int64_t term_number,
__be64 *length);
+int64_t opal_console_flush(int64_t term_number);
int64_t opal_rtc_read(__be32 *year_month_day,
__be64 *hour_minute_second_millisecond);
int64_t opal_rtc_write(uint32_t year_month_day,
@@ -262,6 +263,8 @@ extern int opal_resync_timebase(void);
extern void opal_lpc_init(void);
+extern void opal_kmsg_init(void);
+
extern int opal_event_request(unsigned int opal_event_nr);
struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index e682a7143edb..c50868681f9e 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -44,7 +44,7 @@ static inline void isync(void)
MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
-#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
+#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
#define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n"
#else
#define PPC_ACQUIRE_BARRIER
diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h
index 59dad113897b..c2d21d11c2d2 100644
--- a/arch/powerpc/include/uapi/asm/elf.h
+++ b/arch/powerpc/include/uapi/asm/elf.h
@@ -295,6 +295,8 @@ do { \
#define R_PPC64_TLSLD 108
#define R_PPC64_TOCSAVE 109
+#define R_PPC64_ENTRY 118
+
#define R_PPC64_REL16 249
#define R_PPC64_REL16_LO 250
#define R_PPC64_REL16_HI 251
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 8d14feb40f12..52c1e273f8cd 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -418,8 +418,7 @@ static void *eeh_rmv_device(void *data, void *userdata)
eeh_pcid_put(dev);
if (driver->err_handler &&
driver->err_handler->error_detected &&
- driver->err_handler->slot_reset &&
- driver->err_handler->resume)
+ driver->err_handler->slot_reset)
return NULL;
}
@@ -564,6 +563,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
*/
eeh_pe_state_mark(pe, EEH_PE_KEEP);
if (bus) {
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
pci_lock_rescan_remove();
pcibios_remove_pci_devices(bus);
pci_unlock_rescan_remove();
@@ -803,6 +803,7 @@ perm_error:
* the their PCI config any more.
*/
if (frozen_bus) {
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
pci_lock_rescan_remove();
@@ -886,6 +887,7 @@ static void eeh_handle_special_event(void)
continue;
/* Notify all devices to be down */
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
bus = eeh_pe_bus_get(phb_pe);
eeh_pe_dev_traverse(pe,
eeh_report_failure, NULL);
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 8654cb166c19..98f81800e00c 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -883,32 +883,29 @@ void eeh_pe_restore_bars(struct eeh_pe *pe)
const char *eeh_pe_loc_get(struct eeh_pe *pe)
{
struct pci_bus *bus = eeh_pe_bus_get(pe);
- struct device_node *dn = pci_bus_to_OF_node(bus);
+ struct device_node *dn;
const char *loc = NULL;
- if (!dn)
- goto out;
+ while (bus) {
+ dn = pci_bus_to_OF_node(bus);
+ if (!dn) {
+ bus = bus->parent;
+ continue;
+ }
- /* PHB PE or root PE ? */
- if (pci_is_root_bus(bus)) {
- loc = of_get_property(dn, "ibm,loc-code", NULL);
- if (!loc)
+ if (pci_is_root_bus(bus))
loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
+ else
+ loc = of_get_property(dn, "ibm,slot-location-code",
+ NULL);
+
if (loc)
- goto out;
+ return loc;
- /* Check the root port */
- dn = dn->child;
- if (!dn)
- goto out;
+ bus = bus->parent;
}
- loc = of_get_property(dn, "ibm,loc-code", NULL);
- if (!loc)
- loc = of_get_property(dn, "ibm,slot-location-code", NULL);
-
-out:
- return loc ? loc : "N/A";
+ return "N/A";
}
/**
@@ -931,7 +928,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
bus = pe->phb->bus;
} else if (pe->type & EEH_PE_BUS ||
pe->type & EEH_PE_DEVICE) {
- if (pe->bus) {
+ if (pe->state & EEH_PE_PRI_BUS) {
bus = pe->bus;
goto out;
}
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 68384514506b..e4f7d4eed20c 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -335,7 +335,7 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
if (syms[i].st_shndx == SHN_UNDEF) {
char *name = strtab + syms[i].st_name;
if (name[0] == '.')
- memmove(name, name+1, strlen(name));
+ syms[i].st_name++;
}
}
}
@@ -635,6 +635,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
*/
break;
+ case R_PPC64_ENTRY:
+ /*
+ * Optimize ELFv2 large code model entry point if
+ * the TOC is within 2GB range of current location.
+ */
+ value = my_r2(sechdrs, me) - (unsigned long)location;
+ if (value + 0x80008000 > 0xffffffff)
+ break;
+ /*
+ * Check for the large code model prolog sequence:
+ * ld r2, ...(r12)
+ * add r2, r2, r12
+ */
+ if ((((uint32_t *)location)[0] & ~0xfffc)
+ != 0xe84c0000)
+ break;
+ if (((uint32_t *)location)[1] != 0x7c426214)
+ break;
+ /*
+ * If found, replace it with:
+ * addis r2, r12, (.TOC.-func)@ha
+ * addi r2, r12, (.TOC.-func)@l
+ */
+ ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value);
+ ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value);
+ break;
+
case R_PPC64_REL16_HA:
/* Subtract location pointer */
value -= (unsigned long)location;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index a7b91b54c813..36795d1e7558 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -569,6 +569,24 @@ static void tm_reclaim_thread(struct thread_struct *thr,
if (!MSR_TM_SUSPENDED(mfmsr()))
return;
+ /*
+ * Use the current MSR TM suspended bit to track if we have
+ * checkpointed state outstanding.
+ * On signal delivery, we'd normally reclaim the checkpointed
+ * state to obtain stack pointer (see:get_tm_stackpointer()).
+ * This will then directly return to userspace without going
+ * through __switch_to(). However, if the stack frame is bad,
+ * we need to exit this thread which calls __switch_to() which
+ * will again attempt to reclaim the already saved tm state.
+ * Hence we need to check that we've not already reclaimed
+ * this state.
+ * We do this using the current MSR, rather tracking it in
+ * some specific thread_struct bit, as it has the additional
+ * benifit of checking for a potential TM bad thing exception.
+ */
+ if (!MSR_TM_SUSPENDED(mfmsr()))
+ return;
+
tm_reclaim(thr, thr->regs->msr, cause);
/* Having done the reclaim, we now have the checkpointed
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 3c6badcd53ef..463af88c95a2 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1370,6 +1370,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
std r6, VCPU_ACOP(r9)
stw r7, VCPU_GUEST_PID(r9)
std r8, VCPU_WORT(r9)
+ /*
+ * Restore various registers to 0, where non-zero values
+ * set by the guest could disrupt the host.
+ */
+ li r0, 0
+ mtspr SPRN_IAMR, r0
+ mtspr SPRN_CIABR, r0
+ mtspr SPRN_DAWRX, r0
+ mtspr SPRN_TCSCR, r0
+ mtspr SPRN_WORT, r0
+ /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
+ li r0, 1
+ sldi r0, r0, 31
+ mtspr SPRN_MMCRS, r0
8:
/* Save and reset AMR and UAMOR before turning on the MMU */
@@ -2153,7 +2167,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
- rlwimi r5, r4, 1, DAWRX_WT
+ rlwimi r5, r4, 2, DAWRX_WT
clrrdi r4, r4, 3
std r4, VCPU_DAWR(r3)
std r5, VCPU_DAWRX(r3)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 6fd2405c7f4a..a3b182dcb823 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -919,21 +919,17 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
- vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
+ val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
+ val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
break;
case KVM_REG_PPC_VRSAVE:
- if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
- r = -ENXIO;
- break;
- }
- vcpu->arch.vrsave = set_reg_val(reg->id, val);
+ val = get_reg_val(reg->id, vcpu->arch.vrsave);
break;
#endif /* CONFIG_ALTIVEC */
default:
@@ -974,17 +970,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
- val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
+ vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
+ vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
break;
case KVM_REG_PPC_VRSAVE:
- val = get_reg_val(reg->id, vcpu->arch.vrsave);
+ if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ r = -ENXIO;
+ break;
+ }
+ vcpu->arch.vrsave = set_reg_val(reg->id, val);
break;
#endif /* CONFIG_ALTIVEC */
default:
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 9833fee493ec..807f1594701d 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -486,13 +486,13 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
{
struct hugepd_freelist **batchp;
- batchp = this_cpu_ptr(&hugepd_freelist_cur);
+ batchp = &get_cpu_var(hugepd_freelist_cur);
if (atomic_read(&tlb->mm->mm_users) < 2 ||
cpumask_equal(mm_cpumask(tlb->mm),
cpumask_of(smp_processor_id()))) {
kmem_cache_free(hugepte_cache, hugepte);
- put_cpu_var(hugepd_freelist_cur);
+ put_cpu_var(hugepd_freelist_cur);
return;
}
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 1c8cdb6250e7..b9de7ef48849 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -2,6 +2,7 @@ obj-y += setup.o opal-wrappers.o opal.o opal-async.o idle.o
obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
obj-y += opal-msglog.o opal-hmi.o opal-power.o opal-irqchip.o
+obj-y += opal-kmsg.o
obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index e1c90725522a..2ba602591a20 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -444,9 +444,12 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
* PCI devices of the PE are expected to be removed prior
* to PE reset.
*/
- if (!edev->pe->bus)
+ if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
edev->pe->bus = pci_find_bus(hose->global_number,
pdn->busno);
+ if (edev->pe->bus)
+ edev->pe->state |= EEH_PE_PRI_BUS;
+ }
/*
* Enable EEH explicitly so that we will do EEH check
diff --git a/arch/powerpc/platforms/powernv/opal-kmsg.c b/arch/powerpc/platforms/powernv/opal-kmsg.c
new file mode 100644
index 000000000000..6f1214d4de92
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-kmsg.c
@@ -0,0 +1,75 @@
+/*
+ * kmsg dumper that ensures the OPAL console fully flushes panic messages
+ *
+ * Author: Russell Currey <ruscur@russell.cc>
+ *
+ * Copyright 2015 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kmsg_dump.h>
+
+#include <asm/opal.h>
+#include <asm/opal-api.h>
+
+/*
+ * Console output is controlled by OPAL firmware. The kernel regularly calls
+ * OPAL_POLL_EVENTS, which flushes some console output. In a panic state,
+ * however, the kernel no longer calls OPAL_POLL_EVENTS and the panic message
+ * may not be completely printed. This function does not actually dump the
+ * message, it just ensures that OPAL completely flushes the console buffer.
+ */
+static void force_opal_console_flush(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason)
+{
+ int i;
+ int64_t ret;
+
+ /*
+ * Outside of a panic context the pollers will continue to run,
+ * so we don't need to do any special flushing.
+ */
+ if (reason != KMSG_DUMP_PANIC)
+ return;
+
+ if (opal_check_token(OPAL_CONSOLE_FLUSH)) {
+ ret = opal_console_flush(0);
+
+ if (ret == OPAL_UNSUPPORTED || ret == OPAL_PARAMETER)
+ return;
+
+ /* Incrementally flush until there's nothing left */
+ while (opal_console_flush(0) != OPAL_SUCCESS);
+ } else {
+ /*
+ * If OPAL_CONSOLE_FLUSH is not implemented in the firmware,
+ * the console can still be flushed by calling the polling
+ * function enough times to flush the buffer. We don't know
+ * how much output still needs to be flushed, but we can be
+ * generous since the kernel is in panic and doesn't need
+ * to do much else.
+ */
+ printk(KERN_NOTICE "opal: OPAL_CONSOLE_FLUSH missing.\n");
+ for (i = 0; i < 1024; i++) {
+ opal_poll_events(NULL);
+ }
+ }
+}
+
+static struct kmsg_dumper opal_kmsg_dumper = {
+ .dump = force_opal_console_flush
+};
+
+void __init opal_kmsg_init(void)
+{
+ int rc;
+
+ /* Add our dumper to the list */
+ rc = kmsg_dump_register(&opal_kmsg_dumper);
+ if (rc != 0)
+ pr_err("opal: kmsg_dump_register failed; returned %d\n", rc);
+}
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index b7a464fef7a7..e45b88a5d7e0 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -301,3 +301,4 @@ OPAL_CALL(opal_flash_erase, OPAL_FLASH_ERASE);
OPAL_CALL(opal_prd_msg, OPAL_PRD_MSG);
OPAL_CALL(opal_leds_get_ind, OPAL_LEDS_GET_INDICATOR);
OPAL_CALL(opal_leds_set_ind, OPAL_LEDS_SET_INDICATOR);
+OPAL_CALL(opal_console_flush, OPAL_CONSOLE_FLUSH);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 57cffb80bc36..ae29eaf85e9e 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -758,6 +758,9 @@ static int __init opal_init(void)
opal_pdev_init(opal_node, "ibm,opal-flash");
opal_pdev_init(opal_node, "ibm,opal-prd");
+ /* Initialise OPAL kmsg dumper for flushing console on panic */
+ opal_kmsg_init();
+
return 0;
}
machine_subsys_initcall(powernv, opal_init);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 414fd1a00fda..e40d0714679e 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -3034,6 +3034,7 @@ static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
.dma_dev_setup = pnv_pci_dma_dev_setup,
+ .dma_bus_setup = pnv_pci_dma_bus_setup,
#ifdef CONFIG_PCI_MSI
.setup_msi_irqs = pnv_setup_msi_irqs,
.teardown_msi_irqs = pnv_teardown_msi_irqs,
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index f2dd77234240..ad8c3f4a5e0b 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -601,6 +601,9 @@ int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
long i;
+ if (proto_tce & TCE_PCI_WRITE)
+ proto_tce |= TCE_PCI_READ;
+
for (i = 0; i < npages; i++) {
unsigned long newtce = proto_tce |
((rpn + i) << tbl->it_page_shift);
@@ -622,6 +625,9 @@ int pnv_tce_xchg(struct iommu_table *tbl, long index,
BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
+ if (newtce & TCE_PCI_WRITE)
+ newtce |= TCE_PCI_READ;
+
oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce));
*hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE);
*direction = iommu_tce_direction(oldtce);
@@ -762,6 +768,26 @@ void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
phb->dma_dev_setup(phb, pdev);
}
+void pnv_pci_dma_bus_setup(struct pci_bus *bus)
+{
+ struct pci_controller *hose = bus->sysdata;
+ struct pnv_phb *phb = hose->private_data;
+ struct pnv_ioda_pe *pe;
+
+ list_for_each_entry(pe, &phb->ioda.pe_list, list) {
+ if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
+ continue;
+
+ if (!pe->pbus)
+ continue;
+
+ if (bus->number == ((pe->rid >> 8) & 0xFF)) {
+ pe->pbus = bus;
+ break;
+ }
+ }
+}
+
void pnv_pci_shutdown(void)
{
struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index c8ff50e90766..36a99feab7d8 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -235,6 +235,7 @@ extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
+extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
diff --git a/arch/s390/include/asm/fpu/internal.h b/arch/s390/include/asm/fpu/internal.h
index 2559b16da525..17d9dcd29d45 100644
--- a/arch/s390/include/asm/fpu/internal.h
+++ b/arch/s390/include/asm/fpu/internal.h
@@ -48,6 +48,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
{
fpregs->pad = 0;
+ fpregs->fpc = fpu->fpc;
if (MACHINE_HAS_VX)
convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
else
@@ -57,6 +58,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
{
+ fpu->fpc = fpregs->fpc;
if (MACHINE_HAS_VX)
convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
else
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index efaac2c3bb77..e9a983f40a24 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -506,7 +506,6 @@ struct kvm_vcpu_arch {
struct kvm_s390_sie_block *sie_block;
unsigned int host_acrs[NUM_ACRS];
struct fpu host_fpregs;
- struct fpu guest_fpregs;
struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer;
struct kvm_s390_pgm_info pgm;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index fb1b93ea3e3f..e485817f7b1a 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -15,17 +15,25 @@
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
+ spin_lock_init(&mm->context.list_lock);
+ INIT_LIST_HEAD(&mm->context.pgtable_list);
+ INIT_LIST_HEAD(&mm->context.gmap_list);
cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.attach_count, 0);
mm->context.flush_mm = 0;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
- mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste;
mm->context.has_pgste = 0;
mm->context.use_skey = 0;
#endif
- mm->context.asce_limit = STACK_TOP_MAX;
+ if (mm->context.asce_limit == 0) {
+ /* context created by exec, set asce limit to 4TB */
+ mm->context.asce_bits = _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
+ mm->context.asce_limit = STACK_TOP_MAX;
+ } else if (mm->context.asce_limit == (1UL << 31)) {
+ mm_inc_nr_pmds(mm);
+ }
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0;
}
@@ -111,8 +119,6 @@ static inline void activate_mm(struct mm_struct *prev,
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
- if (oldmm->context.asce_limit < mm->context.asce_limit)
- crst_table_downgrade(mm, oldmm->context.asce_limit);
}
static inline void arch_exit_mmap(struct mm_struct *mm)
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index c873e682b67f..2b2ced9dc00a 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -45,7 +45,7 @@ struct zpci_fmb {
u64 rpcit_ops;
u64 dma_rbytes;
u64 dma_wbytes;
-} __packed __aligned(16);
+} __packed __aligned(64);
enum zpci_state {
ZPCI_FN_STATE_RESERVED,
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 7b7858f158b4..d7cc79fb6191 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -100,12 +100,26 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- spin_lock_init(&mm->context.list_lock);
- INIT_LIST_HEAD(&mm->context.pgtable_list);
- INIT_LIST_HEAD(&mm->context.gmap_list);
- return (pgd_t *) crst_table_alloc(mm);
+ unsigned long *table = crst_table_alloc(mm);
+
+ if (!table)
+ return NULL;
+ if (mm->context.asce_limit == (1UL << 31)) {
+ /* Forking a compat process with 2 page table levels */
+ if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
+ crst_table_free(mm, table);
+ return NULL;
+ }
+ }
+ return (pgd_t *) table;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ if (mm->context.asce_limit == (1UL << 31))
+ pgtable_pmd_page_dtor(virt_to_page(pgd));
+ crst_table_free(mm, (unsigned long *) pgd);
}
-#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
static inline void pmd_populate(struct mm_struct *mm,
pmd_t *pmd, pgtable_t pte)
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 9cd248f637c7..dc6c9c604543 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -181,6 +181,7 @@ int main(void)
OFFSET(__LC_PSW_SAVE_AREA, _lowcore, psw_save_area);
OFFSET(__LC_PREFIX_SAVE_AREA, _lowcore, prefixreg_save_area);
OFFSET(__LC_FP_CREG_SAVE_AREA, _lowcore, fpt_creg_save_area);
+ OFFSET(__LC_TOD_PROGREG_SAVE_AREA, _lowcore, tod_progreg_save_area);
OFFSET(__LC_CPU_TIMER_SAVE_AREA, _lowcore, cpu_timer_save_area);
OFFSET(__LC_CLOCK_COMP_SAVE_AREA, _lowcore, clock_comp_save_area);
OFFSET(__LC_AREGS_SAVE_AREA, _lowcore, access_regs_save_area);
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 66c94417c0ba..4af60374eba0 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -271,7 +271,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
/* Restore high gprs from signal stack */
if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
- sizeof(&sregs_ext->gprs_high)))
+ sizeof(sregs_ext->gprs_high)))
return -EFAULT;
for (i = 0; i < NUM_GPRS; i++)
*(__u32 *)&regs->gprs[i] = gprs_high[i];
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 857b6526d298..424e6809ad07 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -1197,114 +1197,12 @@ cleanup_critical:
.quad .Lpsw_idle_lpsw
.Lcleanup_save_fpu_regs:
- TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- bor %r14
- clg %r9,BASED(.Lcleanup_save_fpu_regs_done)
- jhe 5f
- clg %r9,BASED(.Lcleanup_save_fpu_regs_fp)
- jhe 4f
- clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_high)
- jhe 3f
- clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_low)
- jhe 2f
- clg %r9,BASED(.Lcleanup_save_fpu_fpc_end)
- jhe 1f
- lg %r2,__LC_CURRENT
- aghi %r2,__TASK_thread
-0: # Store floating-point controls
- stfpc __THREAD_FPU_fpc(%r2)
-1: # Load register save area and check if VX is active
- lg %r3,__THREAD_FPU_regs(%r2)
- TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
- jz 4f # no VX -> store FP regs
-2: # Store vector registers (V0-V15)
- VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
-3: # Store vector registers (V16-V31)
- VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
- j 5f # -> done, set CIF_FPU flag
-4: # Store floating-point registers
- std 0,0(%r3)
- std 1,8(%r3)
- std 2,16(%r3)
- std 3,24(%r3)
- std 4,32(%r3)
- std 5,40(%r3)
- std 6,48(%r3)
- std 7,56(%r3)
- std 8,64(%r3)
- std 9,72(%r3)
- std 10,80(%r3)
- std 11,88(%r3)
- std 12,96(%r3)
- std 13,104(%r3)
- std 14,112(%r3)
- std 15,120(%r3)
-5: # Set CIF_FPU flag
- oi __LC_CPU_FLAGS+7,_CIF_FPU
- lg %r9,48(%r11) # return from save_fpu_regs
+ larl %r9,save_fpu_regs
br %r14
-.Lcleanup_save_fpu_fpc_end:
- .quad .Lsave_fpu_regs_fpc_end
-.Lcleanup_save_fpu_regs_vx_low:
- .quad .Lsave_fpu_regs_vx_low
-.Lcleanup_save_fpu_regs_vx_high:
- .quad .Lsave_fpu_regs_vx_high
-.Lcleanup_save_fpu_regs_fp:
- .quad .Lsave_fpu_regs_fp
-.Lcleanup_save_fpu_regs_done:
- .quad .Lsave_fpu_regs_done
.Lcleanup_load_fpu_regs:
- TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- bnor %r14
- clg %r9,BASED(.Lcleanup_load_fpu_regs_done)
- jhe 1f
- clg %r9,BASED(.Lcleanup_load_fpu_regs_fp)
- jhe 2f
- clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_high)
- jhe 3f
- clg %r9,BASED(.Lcleanup_load_fpu_regs_vx)
- jhe 4f
- lg %r4,__LC_CURRENT
- aghi %r4,__TASK_thread
- lfpc __THREAD_FPU_fpc(%r4)
- TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
- lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
- jz 2f # -> no VX, load FP regs
-4: # Load V0 ..V15 registers
- VLM %v0,%v15,0,%r4
-3: # Load V16..V31 registers
- VLM %v16,%v31,256,%r4
- j 1f
-2: # Load floating-point registers
- ld 0,0(%r4)
- ld 1,8(%r4)
- ld 2,16(%r4)
- ld 3,24(%r4)
- ld 4,32(%r4)
- ld 5,40(%r4)
- ld 6,48(%r4)
- ld 7,56(%r4)
- ld 8,64(%r4)
- ld 9,72(%r4)
- ld 10,80(%r4)
- ld 11,88(%r4)
- ld 12,96(%r4)
- ld 13,104(%r4)
- ld 14,112(%r4)
- ld 15,120(%r4)
-1: # Clear CIF_FPU bit
- ni __LC_CPU_FLAGS+7,255-_CIF_FPU
- lg %r9,48(%r11) # return from load_fpu_regs
+ larl %r9,load_fpu_regs
br %r14
-.Lcleanup_load_fpu_regs_vx:
- .quad .Lload_fpu_regs_vx
-.Lcleanup_load_fpu_regs_vx_high:
- .quad .Lload_fpu_regs_vx_high
-.Lcleanup_load_fpu_regs_fp:
- .quad .Lload_fpu_regs_fp
-.Lcleanup_load_fpu_regs_done:
- .quad .Lload_fpu_regs_done
/*
* Integer constants
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 58b719fa8067..1ad2407c7f75 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -16,7 +16,7 @@
__HEAD
ENTRY(startup_continue)
- tm __LC_STFL_FAC_LIST+6,0x80 # LPP available ?
+ tm __LC_STFL_FAC_LIST+5,0x80 # LPP available ?
jz 0f
xc __LC_LPP+1(7,0),__LC_LPP+1 # clear lpp and current_pid
mvi __LC_LPP,0x80 # and set LPP_MAGIC
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index c837bcacf218..1f581eb61bc2 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -329,6 +329,7 @@ static void __init setup_lowcore(void)
+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long) init_thread_union.thread_info.task;
lc->thread_info = (unsigned long) &init_thread_union;
+ lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags;
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 846589281b04..575dc123bda2 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1268,44 +1268,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
return 0;
}
-/*
- * Backs up the current FP/VX register save area on a particular
- * destination. Used to switch between different register save
- * areas.
- */
-static inline void save_fpu_to(struct fpu *dst)
-{
- dst->fpc = current->thread.fpu.fpc;
- dst->regs = current->thread.fpu.regs;
-}
-
-/*
- * Switches the FP/VX register save area from which to lazy
- * restore register contents.
- */
-static inline void load_fpu_from(struct fpu *from)
-{
- current->thread.fpu.fpc = from->fpc;
- current->thread.fpu.regs = from->regs;
-}
-
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
/* Save host register state */
save_fpu_regs();
- save_fpu_to(&vcpu->arch.host_fpregs);
-
- if (test_kvm_facility(vcpu->kvm, 129)) {
- current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
- /*
- * Use the register save area in the SIE-control block
- * for register restore and save in kvm_arch_vcpu_put()
- */
- current->thread.fpu.vxrs =
- (__vector128 *)&vcpu->run->s.regs.vrs;
- } else
- load_fpu_from(&vcpu->arch.guest_fpregs);
+ vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
+ vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
+ /* Depending on MACHINE_HAS_VX, data stored to vrs either
+ * has vector register or floating point register format.
+ */
+ current->thread.fpu.regs = vcpu->run->s.regs.vrs;
+ current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
if (test_fp_ctl(current->thread.fpu.fpc))
/* User space provided an invalid FPC, let's clear it */
current->thread.fpu.fpc = 0;
@@ -1321,19 +1295,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap);
+ /* Save guest register state */
save_fpu_regs();
+ vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
- if (test_kvm_facility(vcpu->kvm, 129))
- /*
- * kvm_arch_vcpu_load() set up the register save area to
- * the &vcpu->run->s.regs.vrs and, thus, the vector registers
- * are already saved. Only the floating-point control must be
- * copied.
- */
- vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
- else
- save_fpu_to(&vcpu->arch.guest_fpregs);
- load_fpu_from(&vcpu->arch.host_fpregs);
+ /* Restore host register state */
+ current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
+ current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
save_access_regs(vcpu->run->s.regs.acrs);
restore_access_regs(vcpu->arch.host_acrs);
@@ -1351,8 +1319,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
vcpu->arch.sie_block->gcr[0] = 0xE0UL;
vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
- vcpu->arch.guest_fpregs.fpc = 0;
- asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
+ /* make sure the new fpc will be lazily loaded */
+ save_fpu_regs();
+ current->thread.fpu.fpc = 0;
vcpu->arch.sie_block->gbea = 1;
vcpu->arch.sie_block->pp = 0;
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
@@ -1501,19 +1470,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.local_int.wq = &vcpu->wq;
vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
- /*
- * Allocate a save area for floating-point registers. If the vector
- * extension is available, register contents are saved in the SIE
- * control block. The allocated save area is still required in
- * particular places, for example, in kvm_s390_vcpu_store_status().
- */
- vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
- GFP_KERNEL);
- if (!vcpu->arch.guest_fpregs.fprs) {
- rc = -ENOMEM;
- goto out_free_sie_block;
- }
-
rc = kvm_vcpu_init(vcpu, kvm, id);
if (rc)
goto out_free_sie_block;
@@ -1734,19 +1690,27 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
+ /* make sure the new values will be lazily loaded */
+ save_fpu_regs();
if (test_fp_ctl(fpu->fpc))
return -EINVAL;
- memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
- vcpu->arch.guest_fpregs.fpc = fpu->fpc;
- save_fpu_regs();
- load_fpu_from(&vcpu->arch.guest_fpregs);
+ current->thread.fpu.fpc = fpu->fpc;
+ if (MACHINE_HAS_VX)
+ convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
+ else
+ memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
return 0;
}
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
- fpu->fpc = vcpu->arch.guest_fpregs.fpc;
+ /* make sure we have the latest values */
+ save_fpu_regs();
+ if (MACHINE_HAS_VX)
+ convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
+ else
+ memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
+ fpu->fpc = current->thread.fpu.fpc;
return 0;
}
@@ -2266,41 +2230,50 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
{
unsigned char archmode = 1;
+ freg_t fprs[NUM_FPRS];
unsigned int px;
u64 clkcomp;
int rc;
+ px = kvm_s390_get_prefix(vcpu);
if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
if (write_guest_abs(vcpu, 163, &archmode, 1))
return -EFAULT;
- gpa = SAVE_AREA_BASE;
+ gpa = 0;
} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
if (write_guest_real(vcpu, 163, &archmode, 1))
return -EFAULT;
- gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
+ gpa = px;
+ } else
+ gpa -= __LC_FPREGS_SAVE_AREA;
+
+ /* manually convert vector registers if necessary */
+ if (MACHINE_HAS_VX) {
+ convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
+ rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
+ fprs, 128);
+ } else {
+ rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
+ vcpu->run->s.regs.vrs, 128);
}
- rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
- vcpu->arch.guest_fpregs.fprs, 128);
- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
+ rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
vcpu->run->s.regs.gprs, 128);
- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
+ rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
&vcpu->arch.sie_block->gpsw, 16);
- px = kvm_s390_get_prefix(vcpu);
- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
+ rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
&px, 4);
- rc |= write_guest_abs(vcpu,
- gpa + offsetof(struct save_area, fp_ctrl_reg),
- &vcpu->arch.guest_fpregs.fpc, 4);
- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
+ rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
+ &vcpu->run->s.regs.fpc, 4);
+ rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
&vcpu->arch.sie_block->todpr, 4);
- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
+ rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
&vcpu->arch.sie_block->cputm, 8);
clkcomp = vcpu->arch.sie_block->ckc >> 8;
- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
+ rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
&clkcomp, 8);
- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
+ rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
&vcpu->run->s.regs.acrs, 64);
- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
+ rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
&vcpu->arch.sie_block->gcr, 128);
return rc ? -EFAULT : 0;
}
@@ -2313,19 +2286,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
* it into the save area
*/
save_fpu_regs();
- if (test_kvm_facility(vcpu->kvm, 129)) {
- /*
- * If the vector extension is available, the vector registers
- * which overlaps with floating-point registers are saved in
- * the SIE-control block. Hence, extract the floating-point
- * registers and the FPC value and store them in the
- * guest_fpregs structure.
- */
- vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
- convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
- current->thread.fpu.vxrs);
- } else
- save_fpu_to(&vcpu->arch.guest_fpregs);
+ vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
save_access_regs(vcpu->run->s.regs.acrs);
return kvm_s390_store_status_unloaded(vcpu, addr);
diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
index 4d1ee88864e8..18c8b819b0aa 100644
--- a/arch/s390/mm/extable.c
+++ b/arch/s390/mm/extable.c
@@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start,
int i;
/* Normalize entries to being relative to the start of the section */
- for (p = start, i = 0; p < finish; p++, i += 8)
+ for (p = start, i = 0; p < finish; p++, i += 8) {
p->insn += i;
+ p->fixup += i + 4;
+ }
sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
/* Denormalize all entries */
- for (p = start, i = 0; p < finish; p++, i += 8)
+ for (p = start, i = 0; p < finish; p++, i += 8) {
p->insn -= i;
+ p->fixup -= i + 4;
+ }
}
#ifdef CONFIG_MODULES
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 7ef12a3ace3a..19442395f413 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -871,8 +871,11 @@ static inline int barsize(u8 size)
static int zpci_mem_init(void)
{
+ BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
+ __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
+
zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
- 16, 0, NULL);
+ __alignof__(struct zpci_fmb), 0, NULL);
if (!zdev_fmb_cache)
goto error_zdev;
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
index ec29e14ec5a8..bf25d7c79a2d 100644
--- a/arch/sh/mm/kmap.c
+++ b/arch/sh/mm/kmap.c
@@ -36,6 +36,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
BUG_ON(!test_bit(PG_dcache_clean, &page->flags));
+ preempt_disable();
pagefault_disable();
idx = FIX_CMAP_END -
@@ -64,4 +65,5 @@ void kunmap_coherent(void *kvaddr)
}
pagefault_enable();
+ preempt_enable();
}
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index c48d93b60afe..b489e9759518 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -413,7 +413,7 @@ out:
SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
{
- int ret;
+ long ret;
if (personality(current->personality) == PER_LINUX32 &&
personality(personality) == PER_LINUX)
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 29880c9b324e..e22e57298522 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -133,7 +133,7 @@ void mconsole_proc(struct mc_request *req)
ptr += strlen("proc");
ptr = skip_spaces(ptr);
- file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY);
+ file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY, 0);
if (IS_ERR(file)) {
mconsole_reply(req, "Failed to open file", 1, 0);
printk(KERN_ERR "open /proc/%s: %ld\n", ptr, PTR_ERR(file));
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index 47f1ff056a54..22a358ef1b0c 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -94,6 +94,8 @@ static int start_ptraced_child(void)
{
int pid, n, status;
+ fflush(stdout);
+
pid = fork();
if (pid == 0)
ptrace_child();
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ae3c83e8bde8..be2395bc2172 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1142,22 +1142,23 @@ config MICROCODE
bool "CPU microcode loading support"
default y
depends on CPU_SUP_AMD || CPU_SUP_INTEL
- depends on BLK_DEV_INITRD
select FW_LOADER
---help---
-
If you say Y here, you will be able to update the microcode on
- certain Intel and AMD processors. The Intel support is for the
- IA32 family, e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4,
- Xeon etc. The AMD support is for families 0x10 and later. You will
- obviously need the actual microcode binary data itself which is not
- shipped with the Linux kernel.
-
- This option selects the general module only, you need to select
- at least one vendor specific module as well.
-
- To compile this driver as a module, choose M here: the module
- will be called microcode.
+ Intel and AMD processors. The Intel support is for the IA32 family,
+ e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4, Xeon etc. The
+ AMD support is for families 0x10 and later. You will obviously need
+ the actual microcode binary data itself which is not shipped with
+ the Linux kernel.
+
+ The preferred method to load microcode from a detached initrd is described
+ in Documentation/x86/early-microcode.txt. For that you need to enable
+ CONFIG_BLK_DEV_INITRD in order for the loader to be able to scan the
+ initrd for microcode blobs.
+
+ In addition, you can build-in the microcode into the kernel. For that you
+ need to enable FIRMWARE_IN_KERNEL and add the vendor-supplied microcode
+ to the CONFIG_EXTRA_FIRMWARE config option.
config MICROCODE_INTEL
bool "Intel microcode loading support"
diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S
index 712b13047b41..3a33124e9112 100644
--- a/arch/x86/crypto/chacha20-ssse3-x86_64.S
+++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S
@@ -157,7 +157,9 @@ ENTRY(chacha20_4block_xor_ssse3)
# done with the slightly better performing SSSE3 byte shuffling,
# 7/12-bit word rotation uses traditional shift+OR.
- sub $0x40,%rsp
+ mov %rsp,%r11
+ sub $0x80,%rsp
+ and $~63,%rsp
# x0..15[0-3] = s0..3[0..3]
movq 0x00(%rdi),%xmm1
@@ -620,6 +622,6 @@ ENTRY(chacha20_4block_xor_ssse3)
pxor %xmm1,%xmm15
movdqu %xmm15,0xf0(%rsi)
- add $0x40,%rsp
+ mov %r11,%rsp
ret
ENDPROC(chacha20_4block_xor_ssse3)
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 03663740c866..1a4477cedc49 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -268,6 +268,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
/* Called with IRQs disabled. */
__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
{
+ struct thread_info *ti = pt_regs_to_thread_info(regs);
u32 cached_flags;
if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
@@ -275,12 +276,22 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
lockdep_sys_exit();
- cached_flags =
- READ_ONCE(pt_regs_to_thread_info(regs)->flags);
+ cached_flags = READ_ONCE(ti->flags);
if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
exit_to_usermode_loop(regs, cached_flags);
+#ifdef CONFIG_COMPAT
+ /*
+ * Compat syscalls set TS_COMPAT. Make sure we clear it before
+ * returning to user mode. We need to clear it *after* signal
+ * handling, because syscall restart has a fixup for compat
+ * syscalls. The fixup is exercised by the ptrace_syscall_32
+ * selftest.
+ */
+ ti->status &= ~TS_COMPAT;
+#endif
+
user_enter();
}
@@ -332,14 +343,6 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
syscall_slow_exit_work(regs, cached_flags);
-#ifdef CONFIG_COMPAT
- /*
- * Compat syscalls set TS_COMPAT. Make sure we clear it before
- * returning to user mode.
- */
- ti->status &= ~TS_COMPAT;
-#endif
-
local_irq_disable();
prepare_exit_to_usermode(regs);
}
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 6a1ae3751e82..15cfebaa7688 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -267,6 +267,7 @@ ENTRY(entry_INT80_compat)
* Interrupts are off on entry.
*/
PARAVIRT_ADJUST_EXCEPTION_FRAME
+ ASM_CLAC /* Do this early to minimize exposure */
SWAPGS
/*
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index a30316bf801a..163769d82475 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -638,8 +638,8 @@ static inline void entering_irq(void)
static inline void entering_ack_irq(void)
{
- ack_APIC_irq();
entering_irq();
+ ack_APIC_irq();
}
static inline void ipi_entering_ack_irq(void)
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 4fa687a47a62..6b8d6e8cd449 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -27,7 +27,7 @@
#define BOOT_HEAP_SIZE 0x400000
#else /* !CONFIG_KERNEL_BZIP2 */
-#define BOOT_HEAP_SIZE 0x8000
+#define BOOT_HEAP_SIZE 0x10000
#endif /* !CONFIG_KERNEL_BZIP2 */
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 1e3408e88604..59caa55fb9b5 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -136,6 +136,7 @@ struct irq_alloc_info {
struct irq_cfg {
unsigned int dest_apicid;
u8 vector;
+ u8 old_vector;
};
extern struct irq_cfg *irq_cfg(unsigned int irq);
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 881b4768644a..e7de5c9a4fbd 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu);
#define __ARCH_HAS_DO_SOFTIRQ
+struct irq_desc;
+
#ifdef CONFIG_HOTPLUG_CPU
#include <linux/cpumask.h>
extern int check_irq_vectors_for_cpu_disable(void);
extern void fixup_irqs(void);
-extern void irq_force_complete_move(int);
+extern void irq_force_complete_move(struct irq_desc *desc);
#endif
#ifdef CONFIG_HAVE_KVM
@@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
extern void (*x86_platform_ipi_callback)(void);
extern void native_init_IRQ(void);
-struct irq_desc;
extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
extern __visible unsigned int do_IRQ(struct pt_regs *regs);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 30cfd64295a0..9d2abb2a41d2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -41,7 +41,7 @@
#define KVM_PIO_PAGE_OFFSET 1
#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
-#define KVM_HALT_POLL_NS_DEFAULT 500000
+#define KVM_HALT_POLL_NS_DEFAULT 400000
#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 34e62b1dcfce..712b24ed3a64 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -2,6 +2,7 @@
#define _ASM_X86_MICROCODE_H
#include <linux/earlycpio.h>
+#include <linux/initrd.h>
#define native_rdmsr(msr, val1, val2) \
do { \
@@ -168,4 +169,29 @@ static inline void reload_early_microcode(void) { }
static inline bool
get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; }
#endif
+
+static inline unsigned long get_initrd_start(void)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+ return initrd_start;
+#else
+ return 0;
+#endif
+}
+
+static inline unsigned long get_initrd_start_addr(void)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+#ifdef CONFIG_X86_32
+ unsigned long *initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
+
+ return (unsigned long)__pa_nodebug(*initrd_start_p);
+#else
+ return get_initrd_start();
+#endif
+#else /* CONFIG_BLK_DEV_INITRD */
+ return 0;
+#endif
+}
+
#endif /* _ASM_X86_MICROCODE_H */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 379cd3658799..bfd9b2a35a0b 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -116,8 +116,36 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
#endif
cpumask_set_cpu(cpu, mm_cpumask(next));
- /* Re-load page tables */
+ /*
+ * Re-load page tables.
+ *
+ * This logic has an ordering constraint:
+ *
+ * CPU 0: Write to a PTE for 'next'
+ * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
+ * CPU 1: set bit 1 in next's mm_cpumask
+ * CPU 1: load from the PTE that CPU 0 writes (implicit)
+ *
+ * We need to prevent an outcome in which CPU 1 observes
+ * the new PTE value and CPU 0 observes bit 1 clear in
+ * mm_cpumask. (If that occurs, then the IPI will never
+ * be sent, and CPU 0's TLB will contain a stale entry.)
+ *
+ * The bad outcome can occur if either CPU's load is
+ * reordered before that CPU's store, so both CPUs must
+ * execute full barriers to prevent this from happening.
+ *
+ * Thus, switch_mm needs a full barrier between the
+ * store to mm_cpumask and any operation that could load
+ * from next->pgd. TLB fills are special and can happen
+ * due to instruction fetches or for no reason at all,
+ * and neither LOCK nor MFENCE orders them.
+ * Fortunately, load_cr3() is serializing and gives the
+ * ordering guarantee we need.
+ *
+ */
load_cr3(next->pgd);
+
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
/* Stop flush ipis for the previous mm */
@@ -156,10 +184,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* schedule, protecting us from simultaneous changes.
*/
cpumask_set_cpu(cpu, mm_cpumask(next));
+
/*
* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
+ *
+ * As above, load_cr3() is serializing and orders TLB
+ * fills with respect to the mm_cpumask write.
*/
load_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index fa1195dae425..164e3f8d3c3d 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock;
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
extern void (*pcibios_disable_irq)(struct pci_dev *dev);
+extern bool mp_should_keep_irq(struct device *dev);
+
struct pci_raw_ops {
int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val);
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 7bcb861a04e5..5a2ed3ed2f26 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -165,6 +165,7 @@ struct x86_pmu_capability {
#define GLOBAL_STATUS_ASIF BIT_ULL(60)
#define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58)
+#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
/*
* IBS cpuid feature detection
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index a471cadb9630..79c91853e50e 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -363,20 +363,18 @@ static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
}
static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
{
+ pgprotval_t val = pgprot_val(pgprot);
pgprot_t new;
- unsigned long val;
- val = pgprot_val(pgprot);
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
return new;
}
static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
{
+ pgprotval_t val = pgprot_val(pgprot);
pgprot_t new;
- unsigned long val;
- val = pgprot_val(pgprot);
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
((val & _PAGE_PAT_LARGE) >>
(_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 8b2d4bea9962..39171b3646bb 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -62,4 +62,6 @@ void xen_arch_register_cpu(int num);
void xen_arch_unregister_cpu(int num);
#endif
+extern void xen_set_iopl_mask(unsigned mask);
+
#endif /* _ASM_X86_XEN_HYPERVISOR_H */
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index d1daead5fcdd..adb3eaf8fe2a 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -16,6 +16,7 @@
#include <asm/cacheflush.h>
#include <asm/realmode.h>
+#include <linux/ftrace.h>
#include "../../realmode/rm/wakeup.h"
#include "sleep.h"
@@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void)
saved_magic = 0x123456789abcdef0L;
#endif /* CONFIG_64BIT */
+ /*
+ * Pause/unpause graph tracing around do_suspend_lowlevel as it has
+ * inconsistent call/return info after it jumps to the wakeup vector.
+ */
+ pause_graph_tracing();
do_suspend_lowlevel();
+ unpause_graph_tracing();
return 0;
}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index f25321894ad2..fdb0fbfb1197 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void)
{
int pin, ioapic, irq, irq_entry;
const struct cpumask *mask;
+ struct irq_desc *desc;
struct irq_data *idata;
struct irq_chip *chip;
@@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void)
if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
continue;
- idata = irq_get_irq_data(irq);
+ desc = irq_to_desc(irq);
+ raw_spin_lock_irq(&desc->lock);
+ idata = irq_desc_get_irq_data(desc);
/*
* Honour affinities which have been set in early boot
@@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void)
/* Might be lapic_chip for irq 0 */
if (chip->irq_set_affinity)
chip->irq_set_affinity(idata, mask, false);
+ raw_spin_unlock_irq(&desc->lock);
}
}
#endif
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 861bc59c8f25..7af2505f20c2 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -30,7 +30,7 @@ struct apic_chip_data {
struct irq_domain *x86_vector_domain;
static DEFINE_RAW_SPINLOCK(vector_lock);
-static cpumask_var_t vector_cpumask;
+static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
static struct irq_chip lapic_controller;
#ifdef CONFIG_X86_IO_APIC
static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
@@ -116,35 +116,47 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
*/
static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
static int current_offset = VECTOR_OFFSET_START % 16;
- int cpu, err;
+ int cpu, vector;
- if (d->move_in_progress)
+ /*
+ * If there is still a move in progress or the previous move has not
+ * been cleaned up completely, tell the caller to come back later.
+ */
+ if (d->move_in_progress ||
+ cpumask_intersects(d->old_domain, cpu_online_mask))
return -EBUSY;
/* Only try and allocate irqs on cpus that are present */
- err = -ENOSPC;
cpumask_clear(d->old_domain);
+ cpumask_clear(searched_cpumask);
cpu = cpumask_first_and(mask, cpu_online_mask);
while (cpu < nr_cpu_ids) {
- int new_cpu, vector, offset;
+ int new_cpu, offset;
+ /* Get the possible target cpus for @mask/@cpu from the apic */
apic->vector_allocation_domain(cpu, vector_cpumask, mask);
+ /*
+ * Clear the offline cpus from @vector_cpumask for searching
+ * and verify whether the result overlaps with @mask. If true,
+ * then the call to apic->cpu_mask_to_apicid_and() will
+ * succeed as well. If not, no point in trying to find a
+ * vector in this mask.
+ */
+ cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
+ if (!cpumask_intersects(vector_searchmask, mask))
+ goto next_cpu;
+
if (cpumask_subset(vector_cpumask, d->domain)) {
- err = 0;
if (cpumask_equal(vector_cpumask, d->domain))
- break;
+ goto success;
/*
- * New cpumask using the vector is a proper subset of
- * the current in use mask. So cleanup the vector
- * allocation for the members that are not used anymore.
+ * Mark the cpus which are not longer in the mask for
+ * cleanup.
*/
- cpumask_andnot(d->old_domain, d->domain,
- vector_cpumask);
- d->move_in_progress =
- cpumask_intersects(d->old_domain, cpu_online_mask);
- cpumask_and(d->domain, d->domain, vector_cpumask);
- break;
+ cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
+ vector = d->cfg.vector;
+ goto update;
}
vector = current_vector;
@@ -156,45 +168,61 @@ next:
vector = FIRST_EXTERNAL_VECTOR + offset;
}
- if (unlikely(current_vector == vector)) {
- cpumask_or(d->old_domain, d->old_domain,
- vector_cpumask);
- cpumask_andnot(vector_cpumask, mask, d->old_domain);
- cpu = cpumask_first_and(vector_cpumask,
- cpu_online_mask);
- continue;
- }
+ /* If the search wrapped around, try the next cpu */
+ if (unlikely(current_vector == vector))
+ goto next_cpu;
if (test_bit(vector, used_vectors))
goto next;
- for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
+ for_each_cpu(new_cpu, vector_searchmask) {
if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
goto next;
}
/* Found one! */
current_vector = vector;
current_offset = offset;
- if (d->cfg.vector) {
+ /* Schedule the old vector for cleanup on all cpus */
+ if (d->cfg.vector)
cpumask_copy(d->old_domain, d->domain);
- d->move_in_progress =
- cpumask_intersects(d->old_domain, cpu_online_mask);
- }
- for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
+ for_each_cpu(new_cpu, vector_searchmask)
per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
- d->cfg.vector = vector;
- cpumask_copy(d->domain, vector_cpumask);
- err = 0;
- break;
- }
+ goto update;
- if (!err) {
- /* cache destination APIC IDs into cfg->dest_apicid */
- err = apic->cpu_mask_to_apicid_and(mask, d->domain,
- &d->cfg.dest_apicid);
+next_cpu:
+ /*
+ * We exclude the current @vector_cpumask from the requested
+ * @mask and try again with the next online cpu in the
+ * result. We cannot modify @mask, so we use @vector_cpumask
+ * as a temporary buffer here as it will be reassigned when
+ * calling apic->vector_allocation_domain() above.
+ */
+ cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
+ cpumask_andnot(vector_cpumask, mask, searched_cpumask);
+ cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
+ continue;
}
+ return -ENOSPC;
- return err;
+update:
+ /*
+ * Exclude offline cpus from the cleanup mask and set the
+ * move_in_progress flag when the result is not empty.
+ */
+ cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
+ d->move_in_progress = !cpumask_empty(d->old_domain);
+ d->cfg.old_vector = d->move_in_progress ? d->cfg.vector : 0;
+ d->cfg.vector = vector;
+ cpumask_copy(d->domain, vector_cpumask);
+success:
+ /*
+ * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
+ * as we already established, that mask & d->domain & cpu_online_mask
+ * is not empty.
+ */
+ BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
+ &d->cfg.dest_apicid));
+ return 0;
}
static int assign_irq_vector(int irq, struct apic_chip_data *data,
@@ -224,10 +252,8 @@ static int assign_irq_vector_policy(int irq, int node,
static void clear_irq_vector(int irq, struct apic_chip_data *data)
{
struct irq_desc *desc;
- unsigned long flags;
int cpu, vector;
- raw_spin_lock_irqsave(&vector_lock, flags);
BUG_ON(!data->cfg.vector);
vector = data->cfg.vector;
@@ -237,10 +263,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
data->cfg.vector = 0;
cpumask_clear(data->domain);
- if (likely(!data->move_in_progress)) {
- raw_spin_unlock_irqrestore(&vector_lock, flags);
+ /*
+ * If move is in progress or the old_domain mask is not empty,
+ * i.e. the cleanup IPI has not been processed yet, we need to remove
+ * the old references to desc from all cpus vector tables.
+ */
+ if (!data->move_in_progress && cpumask_empty(data->old_domain))
return;
- }
desc = irq_to_desc(irq);
for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
@@ -253,7 +282,6 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
}
}
data->move_in_progress = 0;
- raw_spin_unlock_irqrestore(&vector_lock, flags);
}
void init_irq_alloc_info(struct irq_alloc_info *info,
@@ -274,19 +302,24 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
static void x86_vector_free_irqs(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
+ struct apic_chip_data *apic_data;
struct irq_data *irq_data;
+ unsigned long flags;
int i;
for (i = 0; i < nr_irqs; i++) {
irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
if (irq_data && irq_data->chip_data) {
+ raw_spin_lock_irqsave(&vector_lock, flags);
clear_irq_vector(virq + i, irq_data->chip_data);
- free_apic_chip_data(irq_data->chip_data);
+ apic_data = irq_data->chip_data;
+ irq_domain_reset_irq_data(irq_data);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+ free_apic_chip_data(apic_data);
#ifdef CONFIG_X86_IO_APIC
if (virq + i < nr_legacy_irqs())
legacy_irq_data[virq + i] = NULL;
#endif
- irq_domain_reset_irq_data(irq_data);
}
}
}
@@ -404,6 +437,8 @@ int __init arch_early_irq_init(void)
arch_init_htirq_domain(x86_vector_domain);
BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
+ BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
+ BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
return arch_early_ioapic_init();
}
@@ -492,14 +527,7 @@ static int apic_set_affinity(struct irq_data *irq_data,
return -EINVAL;
err = assign_irq_vector(irq, data, dest);
- if (err) {
- if (assign_irq_vector(irq, data,
- irq_data_get_affinity_mask(irq_data)))
- pr_err("Failed to recover vector for irq %d\n", irq);
- return err;
- }
-
- return IRQ_SET_MASK_OK;
+ return err ? err : IRQ_SET_MASK_OK;
}
static struct irq_chip lapic_controller = {
@@ -511,20 +539,12 @@ static struct irq_chip lapic_controller = {
#ifdef CONFIG_SMP
static void __send_cleanup_vector(struct apic_chip_data *data)
{
- cpumask_var_t cleanup_mask;
-
- if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
- unsigned int i;
-
- for_each_cpu_and(i, data->old_domain, cpu_online_mask)
- apic->send_IPI_mask(cpumask_of(i),
- IRQ_MOVE_CLEANUP_VECTOR);
- } else {
- cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
- apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
- free_cpumask_var(cleanup_mask);
- }
+ raw_spin_lock(&vector_lock);
+ cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
data->move_in_progress = 0;
+ if (!cpumask_empty(data->old_domain))
+ apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
+ raw_spin_unlock(&vector_lock);
}
void send_cleanup_vector(struct irq_cfg *cfg)
@@ -568,12 +588,25 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
goto unlock;
/*
- * Check if the irq migration is in progress. If so, we
- * haven't received the cleanup request yet for this irq.
+ * Nothing to cleanup if irq migration is in progress
+ * or this cpu is not set in the cleanup mask.
*/
- if (data->move_in_progress)
+ if (data->move_in_progress ||
+ !cpumask_test_cpu(me, data->old_domain))
goto unlock;
+ /*
+ * We have two cases to handle here:
+ * 1) vector is unchanged but the target mask got reduced
+ * 2) vector and the target mask has changed
+ *
+ * #1 is obvious, but in #2 we have two vectors with the same
+ * irq descriptor: the old and the new vector. So we need to
+ * make sure that we only cleanup the old vector. The new
+ * vector has the current @vector number in the config and
+ * this cpu is part of the target mask. We better leave that
+ * one alone.
+ */
if (vector == data->cfg.vector &&
cpumask_test_cpu(me, data->domain))
goto unlock;
@@ -591,6 +624,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
goto unlock;
}
__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
+ cpumask_clear_cpu(me, data->old_domain);
unlock:
raw_spin_unlock(&desc->lock);
}
@@ -619,12 +653,99 @@ void irq_complete_move(struct irq_cfg *cfg)
__irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
}
-void irq_force_complete_move(int irq)
+/*
+ * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
+ */
+void irq_force_complete_move(struct irq_desc *desc)
{
- struct irq_cfg *cfg = irq_cfg(irq);
+ struct irq_data *irqdata = irq_desc_get_irq_data(desc);
+ struct apic_chip_data *data = apic_chip_data(irqdata);
+ struct irq_cfg *cfg = data ? &data->cfg : NULL;
+ unsigned int cpu;
+
+ if (!cfg)
+ return;
+
+ /*
+ * This is tricky. If the cleanup of @data->old_domain has not been
+ * done yet, then the following setaffinity call will fail with
+ * -EBUSY. This can leave the interrupt in a stale state.
+ *
+ * All CPUs are stuck in stop machine with interrupts disabled so
+ * calling __irq_complete_move() would be completely pointless.
+ */
+ raw_spin_lock(&vector_lock);
+ /*
+ * Clean out all offline cpus (including the outgoing one) from the
+ * old_domain mask.
+ */
+ cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
- if (cfg)
- __irq_complete_move(cfg, cfg->vector);
+ /*
+ * If move_in_progress is cleared and the old_domain mask is empty,
+ * then there is nothing to cleanup. fixup_irqs() will take care of
+ * the stale vectors on the outgoing cpu.
+ */
+ if (!data->move_in_progress && cpumask_empty(data->old_domain)) {
+ raw_spin_unlock(&vector_lock);
+ return;
+ }
+
+ /*
+ * 1) The interrupt is in move_in_progress state. That means that we
+ * have not seen an interrupt since the io_apic was reprogrammed to
+ * the new vector.
+ *
+ * 2) The interrupt has fired on the new vector, but the cleanup IPIs
+ * have not been processed yet.
+ */
+ if (data->move_in_progress) {
+ /*
+ * In theory there is a race:
+ *
+ * set_ioapic(new_vector) <-- Interrupt is raised before update
+ * is effective, i.e. it's raised on
+ * the old vector.
+ *
+ * So if the target cpu cannot handle that interrupt before
+ * the old vector is cleaned up, we get a spurious interrupt
+ * and in the worst case the ioapic irq line becomes stale.
+ *
+ * But in case of cpu hotplug this should be a non issue
+ * because if the affinity update happens right before all
+ * cpus rendevouz in stop machine, there is no way that the
+ * interrupt can be blocked on the target cpu because all cpus
+ * loops first with interrupts enabled in stop machine, so the
+ * old vector is not yet cleaned up when the interrupt fires.
+ *
+ * So the only way to run into this issue is if the delivery
+ * of the interrupt on the apic/system bus would be delayed
+ * beyond the point where the target cpu disables interrupts
+ * in stop machine. I doubt that it can happen, but at least
+ * there is a theroretical chance. Virtualization might be
+ * able to expose this, but AFAICT the IOAPIC emulation is not
+ * as stupid as the real hardware.
+ *
+ * Anyway, there is nothing we can do about that at this point
+ * w/o refactoring the whole fixup_irq() business completely.
+ * We print at least the irq number and the old vector number,
+ * so we have the necessary information when a problem in that
+ * area arises.
+ */
+ pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
+ irqdata->irq, cfg->old_vector);
+ }
+ /*
+ * If old_domain is not empty, then other cpus still have the irq
+ * descriptor set in their vector array. Clean it up.
+ */
+ for_each_cpu(cpu, data->old_domain)
+ per_cpu(vector_irq, cpu)[cfg->old_vector] = VECTOR_UNUSED;
+
+ /* Cleanup the left overs of the (half finished) move */
+ cpumask_clear(data->old_domain);
+ data->move_in_progress = 0;
+ raw_spin_unlock(&vector_lock);
}
#endif
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index ce47402eb2f9..ac8975a65280 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -555,10 +555,14 @@ scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
cd.data = NULL;
cd.size = 0;
- cd = find_cpio_data(p, (void *)start, size, &offset);
- if (!cd.data) {
+ /* try built-in microcode if no initrd */
+ if (!size) {
if (!load_builtin_intel_microcode(&cd))
return UCODE_ERROR;
+ } else {
+ cd = find_cpio_data(p, (void *)start, size, &offset);
+ if (!cd.data)
+ return UCODE_ERROR;
}
return get_matching_model_microcode(0, start, cd.data, cd.size,
@@ -694,7 +698,7 @@ int __init save_microcode_in_initrd_intel(void)
if (count == 0)
return ret;
- copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count);
+ copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, get_initrd_start(), count);
ret = save_microcode(&mc_saved_data, mc_saved, count);
if (ret)
pr_err("Cannot save microcode patches from initrd.\n");
@@ -732,16 +736,20 @@ void __init load_ucode_intel_bsp(void)
struct boot_params *p;
p = (struct boot_params *)__pa_nodebug(&boot_params);
- start = p->hdr.ramdisk_image;
size = p->hdr.ramdisk_size;
- _load_ucode_intel_bsp(
- (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
- (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
- start, size);
+ /*
+ * Set start only if we have an initrd image. We cannot use initrd_start
+ * because it is not set that early yet.
+ */
+ start = (size ? p->hdr.ramdisk_image : 0);
+
+ _load_ucode_intel_bsp((struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
+ (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
+ start, size);
#else
- start = boot_params.hdr.ramdisk_image + PAGE_OFFSET;
size = boot_params.hdr.ramdisk_size;
+ start = (size ? boot_params.hdr.ramdisk_image + PAGE_OFFSET : 0);
_load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
#endif
@@ -752,20 +760,14 @@ void load_ucode_intel_ap(void)
struct mc_saved_data *mc_saved_data_p;
struct ucode_cpu_info uci;
unsigned long *mc_saved_in_initrd_p;
- unsigned long initrd_start_addr;
enum ucode_state ret;
#ifdef CONFIG_X86_32
- unsigned long *initrd_start_p;
- mc_saved_in_initrd_p =
- (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
+ mc_saved_in_initrd_p = (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
- initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
- initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
#else
- mc_saved_data_p = &mc_saved_data;
mc_saved_in_initrd_p = mc_saved_in_initrd;
- initrd_start_addr = initrd_start;
+ mc_saved_data_p = &mc_saved_data;
#endif
/*
@@ -777,7 +779,7 @@ void load_ucode_intel_ap(void)
collect_cpu_info_early(&uci);
ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
- initrd_start_addr, &uci);
+ get_initrd_start_addr(), &uci);
if (ret != UCODE_OK)
return;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 2bf79d7c97df..a3aeb2cc361e 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -593,6 +593,19 @@ void x86_pmu_disable_all(void)
}
}
+/*
+ * There may be PMI landing after enabled=0. The PMI hitting could be before or
+ * after disable_all.
+ *
+ * If PMI hits before disable_all, the PMU will be disabled in the NMI handler.
+ * It will not be re-enabled in the NMI handler again, because enabled=0. After
+ * handling the NMI, disable_all will be called, which will not change the
+ * state either. If PMI hits after disable_all, the PMU is already disabled
+ * before entering NMI handler. The NMI handler will not change the state
+ * either.
+ *
+ * So either situation is harmless.
+ */
static void x86_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index d0e35ebb2adb..ee70445fbb1f 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -591,6 +591,7 @@ struct x86_pmu {
pebs_active :1,
pebs_broken :1;
int pebs_record_size;
+ int pebs_buffer_size;
void (*drain_pebs)(struct pt_regs *regs);
struct event_constraint *pebs_constraints;
void (*pebs_aliases)(struct perf_event *event);
@@ -907,6 +908,8 @@ void intel_pmu_lbr_init_hsw(void);
void intel_pmu_lbr_init_skl(void);
+void intel_pmu_pebs_data_source_nhm(void);
+
int intel_pmu_setup_lbr_filter(struct perf_event *event);
void intel_pt_interrupt(void);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index e2a430021e46..078de2e86b7a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1458,7 +1458,15 @@ static __initconst const u64 slm_hw_cache_event_ids
};
/*
- * Use from PMIs where the LBRs are already disabled.
+ * Used from PMIs where the LBRs are already disabled.
+ *
+ * This function could be called consecutively. It is required to remain in
+ * disabled state if called consecutively.
+ *
+ * During consecutive calls, the same disable value will be written to related
+ * registers, so the PMU state remains unchanged. hw.state in
+ * intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive
+ * calls.
*/
static void __intel_pmu_disable_all(void)
{
@@ -1840,6 +1848,16 @@ again:
if (__test_and_clear_bit(62, (unsigned long *)&status)) {
handled++;
x86_pmu.drain_pebs(regs);
+ /*
+ * There are cases where, even though, the PEBS ovfl bit is set
+ * in GLOBAL_OVF_STATUS, the PEBS events may also have their
+ * overflow bits set for their counters. We must clear them
+ * here because they have been processed as exact samples in
+ * the drain_pebs() routine. They must not be processed again
+ * in the for_each_bit_set() loop for regular samples below.
+ */
+ status &= ~cpuc->pebs_enabled;
+ status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
}
/*
@@ -1885,7 +1903,10 @@ again:
goto again;
done:
- __intel_pmu_enable_all(0, true);
+ /* Only restore PMU state when it's active. See x86_pmu_disable(). */
+ if (cpuc->enabled)
+ __intel_pmu_enable_all(0, true);
+
/*
* Only unmask the NMI after the overflow counters
* have been reset. This avoids spurious NMIs on
@@ -3315,6 +3336,7 @@ __init int intel_pmu_init(void)
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
+ intel_pmu_pebs_data_source_nhm();
x86_add_quirk(intel_nehalem_quirk);
pr_cont("Nehalem events, ");
@@ -3377,6 +3399,7 @@ __init int intel_pmu_init(void)
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
+ intel_pmu_pebs_data_source_nhm();
pr_cont("Westmere events, ");
break;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 5db1c7755548..7abb2b88572e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -51,7 +51,8 @@ union intel_x86_pebs_dse {
#define OP_LH (P(OP, LOAD) | P(LVL, HIT))
#define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
-static const u64 pebs_data_source[] = {
+/* Version for Sandy Bridge and later */
+static u64 pebs_data_source[] = {
P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
OP_LH | P(LVL, L1) | P(SNOOP, NONE), /* 0x01: L1 local */
OP_LH | P(LVL, LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
@@ -70,6 +71,14 @@ static const u64 pebs_data_source[] = {
OP_LH | P(LVL, UNC) | P(SNOOP, NONE), /* 0x0f: uncached */
};
+/* Patch up minor differences in the bits */
+void __init intel_pmu_pebs_data_source_nhm(void)
+{
+ pebs_data_source[0x05] = OP_LH | P(LVL, L3) | P(SNOOP, HIT);
+ pebs_data_source[0x06] = OP_LH | P(LVL, L3) | P(SNOOP, HITM);
+ pebs_data_source[0x07] = OP_LH | P(LVL, L3) | P(SNOOP, HITM);
+}
+
static u64 precise_store_data(u64 status)
{
union intel_x86_pebs_dse dse;
@@ -269,7 +278,7 @@ static int alloc_pebs_buffer(int cpu)
if (!x86_pmu.pebs)
return 0;
- buffer = kzalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL, node);
+ buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
if (unlikely(!buffer))
return -ENOMEM;
@@ -286,7 +295,7 @@ static int alloc_pebs_buffer(int cpu)
per_cpu(insn_buffer, cpu) = ibuffer;
}
- max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
+ max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size;
ds->pebs_buffer_base = (u64)(unsigned long)buffer;
ds->pebs_index = ds->pebs_buffer_base;
@@ -1296,6 +1305,7 @@ void __init intel_ds_init(void)
x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
+ x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
if (x86_pmu.pebs) {
char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
int format = x86_pmu.intel_cap.pebs_format;
@@ -1304,6 +1314,14 @@ void __init intel_ds_init(void)
case 0:
printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
+ /*
+ * Using >PAGE_SIZE buffers makes the WRMSR to
+ * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
+ * mysteriously hang on Core2.
+ *
+ * As a workaround, we don't do this.
+ */
+ x86_pmu.pebs_buffer_size = PAGE_SIZE;
x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
break;
diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c
index 5b0c232d1ee6..b931095e86d4 100644
--- a/arch/x86/kernel/cpu/perf_event_knc.c
+++ b/arch/x86/kernel/cpu/perf_event_knc.c
@@ -263,7 +263,9 @@ again:
goto again;
done:
- knc_pmu_enable_all(0);
+ /* Only restore PMU state when it's active. See x86_pmu_disable(). */
+ if (cpuc->enabled)
+ knc_pmu_enable_all(0);
return handled;
}
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index 37dae792dbbe..589b3193f102 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -96,9 +96,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
SYSCALL_DEFINE1(iopl, unsigned int, level)
{
struct pt_regs *regs = current_pt_regs();
- unsigned int old = (regs->flags >> 12) & 3;
struct thread_struct *t = &current->thread;
+ /*
+ * Careful: the IOPL bits in regs->flags are undefined under Xen PV
+ * and changing them has no effect.
+ */
+ unsigned int old = t->iopl >> X86_EFLAGS_IOPL_BIT;
+
if (level > 3)
return -EINVAL;
/* Trying to gain more privileges? */
@@ -106,8 +111,9 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
}
- regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
- t->iopl = level << 12;
+ regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) |
+ (level << X86_EFLAGS_IOPL_BIT);
+ t->iopl = level << X86_EFLAGS_IOPL_BIT;
set_iopl_mask(t->iopl);
return 0;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index f8062aaf5df9..61521dc19c10 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -462,7 +462,7 @@ void fixup_irqs(void)
* non intr-remapping case, we can't wait till this interrupt
* arrives at this cpu before completing the irq move.
*/
- irq_force_complete_move(irq);
+ irq_force_complete_move(desc);
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
break_affinity = 1;
@@ -470,6 +470,15 @@ void fixup_irqs(void)
}
chip = irq_data_get_irq_chip(data);
+ /*
+ * The interrupt descriptor might have been cleaned up
+ * already, but it is not yet removed from the radix tree
+ */
+ if (!chip) {
+ raw_spin_unlock(&desc->lock);
+ continue;
+ }
+
if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
chip->irq_mask(data);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index e835d263a33b..4cbb60fbff3e 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -48,6 +48,7 @@
#include <asm/syscalls.h>
#include <asm/debugreg.h>
#include <asm/switch_to.h>
+#include <asm/xen/hypervisor.h>
asmlinkage extern void ret_from_fork(void);
@@ -411,6 +412,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
__switch_to_xtra(prev_p, next_p, tss);
+#ifdef CONFIG_XEN
+ /*
+ * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
+ * current_pt_regs()->flags may not match the current task's
+ * intended IOPL. We need to switch it manually.
+ */
+ if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
+ prev->iopl != next->iopl))
+ xen_set_iopl_mask(next->iopl);
+#endif
+
if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
/*
* AMD CPUs have a misfeature: SYSRET sets the SS selector but
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 02693dd9a079..f660d63f40fe 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -182,6 +182,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
},
},
+ { /* Handle problems with rebooting on the iMac10,1. */
+ .callback = set_pci_reboot,
+ .ident = "Apple iMac10,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac10,1"),
+ },
+ },
/* ASRock */
{ /* Handle problems with rebooting on ASRock Q1900DC-ITX */
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 1505587d06e9..b9b09fec173b 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -650,10 +650,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
u16 sel;
la = seg_base(ctxt, addr.seg) + addr.ea;
- *linear = la;
*max_size = 0;
switch (mode) {
case X86EMUL_MODE_PROT64:
+ *linear = la;
if (is_noncanonical_address(la))
goto bad;
@@ -662,6 +662,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
goto bad;
break;
default:
+ *linear = la = (u32)la;
usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
addr.seg);
if (!usable)
@@ -689,7 +690,6 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
if (size > *max_size)
goto bad;
}
- la &= (u32)-1;
break;
}
if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index b0ea42b78ccd..ab5318727579 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -245,7 +245,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
* PIC is being reset. Handle it gracefully here
*/
atomic_inc(&ps->pending);
- else if (value > 0)
+ else if (value > 0 && ps->reinject)
/* in this case, we had multiple outstanding pit interrupts
* that we needed to inject. Reinject
*/
@@ -288,7 +288,9 @@ static void pit_do_work(struct kthread_work *work)
* last one has been acked.
*/
spin_lock(&ps->inject_lock);
- if (ps->irq_ack) {
+ if (!ps->reinject)
+ inject = 1;
+ else if (ps->irq_ack) {
ps->irq_ack = 0;
inject = 1;
}
@@ -317,10 +319,10 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
struct kvm_pit *pt = ps->kvm->arch.vpit;
- if (ps->reinject || !atomic_read(&ps->pending)) {
+ if (ps->reinject)
atomic_inc(&ps->pending);
- queue_kthread_work(&pt->worker, &pt->expired);
- }
+
+ queue_kthread_work(&pt->worker, &pt->expired);
if (ps->is_periodic) {
hrtimer_add_expires_ns(&ps->timer, ps->period);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e7c2c1428a69..8eb8a934b531 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3754,13 +3754,15 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
+ bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
+
/*
* Passing "true" to the last argument is okay; it adds a check
* on bit 8 of the SPTEs which KVM doesn't use anyway.
*/
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
boot_cpu_data.x86_phys_bits,
- context->shadow_root_level, context->nx,
+ context->shadow_root_level, uses_nx,
guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
true);
}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 3058a22a658d..7be8a251363e 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -249,7 +249,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
return ret;
kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
- walker->ptes[level] = pte;
+ walker->ptes[level - 1] = pte;
}
return 0;
}
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 120302511802..ab9ae67a80e4 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -268,7 +268,7 @@ TRACE_EVENT(kvm_inj_virq,
#define kvm_trace_sym_exc \
EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \
EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \
- EXS(MF), EXS(MC)
+ EXS(MF), EXS(AC), EXS(MC)
/*
* Tracepoint for kvm interrupt injection:
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 44976a596fa6..f34ab71dfd57 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -595,6 +595,8 @@ struct vcpu_vmx {
/* Support for PML */
#define PML_ENTITY_NUM 512
struct page *pml_pg;
+
+ u64 current_tsc_ratio;
};
enum segment_cache_field {
@@ -1746,6 +1748,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
return;
}
break;
+ case MSR_IA32_PEBS_ENABLE:
+ /* PEBS needs a quiescent period after being disabled (to write
+ * a record). Disabling PEBS through VMX MSR swapping doesn't
+ * provide that period, so a CPU could write host's record into
+ * guest's memory.
+ */
+ wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
}
for (i = 0; i < m->nr; ++i)
@@ -1783,26 +1792,31 @@ static void reload_tss(void)
static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
{
- u64 guest_efer;
- u64 ignore_bits;
+ u64 guest_efer = vmx->vcpu.arch.efer;
+ u64 ignore_bits = 0;
- guest_efer = vmx->vcpu.arch.efer;
+ if (!enable_ept) {
+ /*
+ * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
+ * host CPUID is more efficient than testing guest CPUID
+ * or CR4. Host SMEP is anyway a requirement for guest SMEP.
+ */
+ if (boot_cpu_has(X86_FEATURE_SMEP))
+ guest_efer |= EFER_NX;
+ else if (!(guest_efer & EFER_NX))
+ ignore_bits |= EFER_NX;
+ }
/*
- * NX is emulated; LMA and LME handled by hardware; SCE meaningless
- * outside long mode
+ * LMA and LME handled by hardware; SCE meaningless outside long mode.
*/
- ignore_bits = EFER_NX | EFER_SCE;
+ ignore_bits |= EFER_SCE;
#ifdef CONFIG_X86_64
ignore_bits |= EFER_LMA | EFER_LME;
/* SCE is meaningful only in long mode on Intel */
if (guest_efer & EFER_LMA)
ignore_bits &= ~(u64)EFER_SCE;
#endif
- guest_efer &= ~ignore_bits;
- guest_efer |= host_efer & ignore_bits;
- vmx->guest_msrs[efer_offset].data = guest_efer;
- vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
clear_atomic_switch_msr(vmx, MSR_EFER);
@@ -1813,16 +1827,21 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
*/
if (cpu_has_load_ia32_efer ||
(enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
- guest_efer = vmx->vcpu.arch.efer;
if (!(guest_efer & EFER_LMA))
guest_efer &= ~EFER_LME;
if (guest_efer != host_efer)
add_atomic_switch_msr(vmx, MSR_EFER,
guest_efer, host_efer);
return false;
- }
+ } else {
+ guest_efer &= ~ignore_bits;
+ guest_efer |= host_efer & ignore_bits;
- return true;
+ vmx->guest_msrs[efer_offset].data = guest_efer;
+ vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
+
+ return true;
+ }
}
static unsigned long segment_base(u16 selector)
@@ -2062,14 +2081,16 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
- /* Setup TSC multiplier */
- if (cpu_has_vmx_tsc_scaling())
- vmcs_write64(TSC_MULTIPLIER,
- vcpu->arch.tsc_scaling_ratio);
-
vmx->loaded_vmcs->cpu = cpu;
}
+ /* Setup TSC multiplier */
+ if (kvm_has_tsc_control &&
+ vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
+ vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
+ vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
+ }
+
vmx_vcpu_pi_load(vcpu, cpu);
}
@@ -2616,8 +2637,15 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
} else
vmx->nested.nested_vmx_ept_caps = 0;
+ /*
+ * Old versions of KVM use the single-context version without
+ * checking for support, so declare that it is supported even
+ * though it is treated as global context. The alternative is
+ * not failing the single-context invvpid, and it is worse.
+ */
if (enable_vpid)
vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
+ VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |
VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
else
vmx->nested.nested_vmx_vpid_caps = 0;
@@ -7319,6 +7347,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
if (!(types & (1UL << type))) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+ skip_emulated_instruction(vcpu);
return 1;
}
@@ -7377,6 +7406,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
if (!(types & (1UL << type))) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+ skip_emulated_instruction(vcpu);
return 1;
}
@@ -7393,12 +7423,17 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
}
switch (type) {
+ case VMX_VPID_EXTENT_SINGLE_CONTEXT:
+ /*
+ * Old versions of KVM use the single-context version so we
+ * have to support it; just treat it the same as all-context.
+ */
case VMX_VPID_EXTENT_ALL_CONTEXT:
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
nested_vmx_succeed(vcpu);
break;
default:
- /* Trap single context invalidation invvpid calls */
+ /* Trap individual address invalidation invvpid calls */
BUG_ON(1);
break;
}
@@ -8932,7 +8967,8 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
best->ebx &= ~bit(X86_FEATURE_INVPCID);
}
- vmcs_set_secondary_exec_control(secondary_exec_ctl);
+ if (cpu_has_secondary_exec_ctrls())
+ vmcs_set_secondary_exec_control(secondary_exec_ctl);
if (static_cpu_has(X86_FEATURE_PCOMMIT) && nested) {
if (guest_cpuid_has_pcommit(vcpu))
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 97592e190413..7eb4ebd3ebea 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -951,7 +951,7 @@ static u32 msrs_to_save[] = {
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif
MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
- MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS
+ MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
};
static unsigned num_msrs_to_save;
@@ -2736,6 +2736,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -4006,16 +4007,17 @@ static void kvm_init_msr_list(void)
/*
* Even MSRs that are valid in the host may not be exposed
- * to the guests in some cases. We could work around this
- * in VMX with the generic MSR save/load machinery, but it
- * is not really worthwhile since it will really only
- * happen with nested virtualization.
+ * to the guests in some cases.
*/
switch (msrs_to_save[i]) {
case MSR_IA32_BNDCFGS:
if (!kvm_x86_ops->mpx_supported())
continue;
break;
+ case MSR_TSC_AUX:
+ if (!kvm_x86_ops->rdtscp_supported())
+ continue;
+ break;
default:
break;
}
@@ -6022,12 +6024,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
}
/* try to inject new event if pending */
- if (vcpu->arch.nmi_pending) {
- if (kvm_x86_ops->nmi_allowed(vcpu)) {
- --vcpu->arch.nmi_pending;
- vcpu->arch.nmi_injected = true;
- kvm_x86_ops->set_nmi(vcpu);
- }
+ if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
+ --vcpu->arch.nmi_pending;
+ vcpu->arch.nmi_injected = true;
+ kvm_x86_ops->set_nmi(vcpu);
} else if (kvm_cpu_has_injectable_intr(vcpu)) {
/*
* Because interrupts can be injected asynchronously, we are
@@ -6472,10 +6472,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (inject_pending_event(vcpu, req_int_win) != 0)
req_immediate_exit = true;
/* enable NMI/IRQ window open exits if needed */
- else if (vcpu->arch.nmi_pending)
- kvm_x86_ops->enable_nmi_window(vcpu);
- else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
- kvm_x86_ops->enable_irq_window(vcpu);
+ else {
+ if (vcpu->arch.nmi_pending)
+ kvm_x86_ops->enable_nmi_window(vcpu);
+ if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
+ kvm_x86_ops->enable_irq_window(vcpu);
+ }
if (kvm_lapic_enabled(vcpu)) {
update_cr8_intercept(vcpu);
@@ -6543,12 +6545,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* KVM_DEBUGREG_WONT_EXIT again.
*/
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
- int i;
-
WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
kvm_x86_ops->sync_dirty_debug_regs(vcpu);
- for (i = 0; i < KVM_NR_DB_REGS; i++)
- vcpu->arch.eff_db[i] = vcpu->arch.db[i];
+ kvm_update_dr0123(vcpu);
+ kvm_update_dr6(vcpu);
+ kvm_update_dr7(vcpu);
+ vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
}
/*
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 982ce34f4a9b..27f89c79a44b 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -232,17 +232,31 @@ ENDPROC(copy_user_enhanced_fast_string)
/*
* copy_user_nocache - Uncached memory copy with exception handling
- * This will force destination/source out of cache for more performance.
+ * This will force destination out of cache for more performance.
+ *
+ * Note: Cached memory copy is used when destination or size is not
+ * naturally aligned. That is:
+ * - Require 8-byte alignment when size is 8 bytes or larger.
+ * - Require 4-byte alignment when size is 4 bytes.
*/
ENTRY(__copy_user_nocache)
ASM_STAC
+
+ /* If size is less than 8 bytes, go to 4-byte copy */
cmpl $8,%edx
- jb 20f /* less then 8 bytes, go to byte copy loop */
+ jb .L_4b_nocache_copy_entry
+
+ /* If destination is not 8-byte aligned, "cache" copy to align it */
ALIGN_DESTINATION
+
+ /* Set 4x8-byte copy count and remainder */
movl %edx,%ecx
andl $63,%edx
shrl $6,%ecx
- jz 17f
+ jz .L_8b_nocache_copy_entry /* jump if count is 0 */
+
+ /* Perform 4x8-byte nocache loop-copy */
+.L_4x8b_nocache_copy_loop:
1: movq (%rsi),%r8
2: movq 1*8(%rsi),%r9
3: movq 2*8(%rsi),%r10
@@ -262,60 +276,106 @@ ENTRY(__copy_user_nocache)
leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi
decl %ecx
- jnz 1b
-17: movl %edx,%ecx
+ jnz .L_4x8b_nocache_copy_loop
+
+ /* Set 8-byte copy count and remainder */
+.L_8b_nocache_copy_entry:
+ movl %edx,%ecx
andl $7,%edx
shrl $3,%ecx
- jz 20f
-18: movq (%rsi),%r8
-19: movnti %r8,(%rdi)
+ jz .L_4b_nocache_copy_entry /* jump if count is 0 */
+
+ /* Perform 8-byte nocache loop-copy */
+.L_8b_nocache_copy_loop:
+20: movq (%rsi),%r8
+21: movnti %r8,(%rdi)
leaq 8(%rsi),%rsi
leaq 8(%rdi),%rdi
decl %ecx
- jnz 18b
-20: andl %edx,%edx
- jz 23f
+ jnz .L_8b_nocache_copy_loop
+
+ /* If no byte left, we're done */
+.L_4b_nocache_copy_entry:
+ andl %edx,%edx
+ jz .L_finish_copy
+
+ /* If destination is not 4-byte aligned, go to byte copy: */
+ movl %edi,%ecx
+ andl $3,%ecx
+ jnz .L_1b_cache_copy_entry
+
+ /* Set 4-byte copy count (1 or 0) and remainder */
movl %edx,%ecx
-21: movb (%rsi),%al
-22: movb %al,(%rdi)
+ andl $3,%edx
+ shrl $2,%ecx
+ jz .L_1b_cache_copy_entry /* jump if count is 0 */
+
+ /* Perform 4-byte nocache copy: */
+30: movl (%rsi),%r8d
+31: movnti %r8d,(%rdi)
+ leaq 4(%rsi),%rsi
+ leaq 4(%rdi),%rdi
+
+ /* If no bytes left, we're done: */
+ andl %edx,%edx
+ jz .L_finish_copy
+
+ /* Perform byte "cache" loop-copy for the remainder */
+.L_1b_cache_copy_entry:
+ movl %edx,%ecx
+.L_1b_cache_copy_loop:
+40: movb (%rsi),%al
+41: movb %al,(%rdi)
incq %rsi
incq %rdi
decl %ecx
- jnz 21b
-23: xorl %eax,%eax
+ jnz .L_1b_cache_copy_loop
+
+ /* Finished copying; fence the prior stores */
+.L_finish_copy:
+ xorl %eax,%eax
ASM_CLAC
sfence
ret
.section .fixup,"ax"
-30: shll $6,%ecx
+.L_fixup_4x8b_copy:
+ shll $6,%ecx
addl %ecx,%edx
- jmp 60f
-40: lea (%rdx,%rcx,8),%rdx
- jmp 60f
-50: movl %ecx,%edx
-60: sfence
+ jmp .L_fixup_handle_tail
+.L_fixup_8b_copy:
+ lea (%rdx,%rcx,8),%rdx
+ jmp .L_fixup_handle_tail
+.L_fixup_4b_copy:
+ lea (%rdx,%rcx,4),%rdx
+ jmp .L_fixup_handle_tail
+.L_fixup_1b_copy:
+ movl %ecx,%edx
+.L_fixup_handle_tail:
+ sfence
jmp copy_user_handle_tail
.previous
- _ASM_EXTABLE(1b,30b)
- _ASM_EXTABLE(2b,30b)
- _ASM_EXTABLE(3b,30b)
- _ASM_EXTABLE(4b,30b)
- _ASM_EXTABLE(5b,30b)
- _ASM_EXTABLE(6b,30b)
- _ASM_EXTABLE(7b,30b)
- _ASM_EXTABLE(8b,30b)
- _ASM_EXTABLE(9b,30b)
- _ASM_EXTABLE(10b,30b)
- _ASM_EXTABLE(11b,30b)
- _ASM_EXTABLE(12b,30b)
- _ASM_EXTABLE(13b,30b)
- _ASM_EXTABLE(14b,30b)
- _ASM_EXTABLE(15b,30b)
- _ASM_EXTABLE(16b,30b)
- _ASM_EXTABLE(18b,40b)
- _ASM_EXTABLE(19b,40b)
- _ASM_EXTABLE(21b,50b)
- _ASM_EXTABLE(22b,50b)
+ _ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(20b,.L_fixup_8b_copy)
+ _ASM_EXTABLE(21b,.L_fixup_8b_copy)
+ _ASM_EXTABLE(30b,.L_fixup_4b_copy)
+ _ASM_EXTABLE(31b,.L_fixup_4b_copy)
+ _ASM_EXTABLE(40b,.L_fixup_1b_copy)
+ _ASM_EXTABLE(41b,.L_fixup_1b_copy)
ENDPROC(__copy_user_nocache)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index eef44d9a3f77..e830c71a1323 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -287,6 +287,9 @@ static noinline int vmalloc_fault(unsigned long address)
if (!pmd_k)
return -1;
+ if (pmd_huge(*pmd_k))
+ return 0;
+
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
return -1;
@@ -360,8 +363,6 @@ void vmalloc_sync_all(void)
* 64-bit:
*
* Handle a fault on the vmalloc area
- *
- * This assumes no large pages in there.
*/
static noinline int vmalloc_fault(unsigned long address)
{
@@ -403,17 +404,23 @@ static noinline int vmalloc_fault(unsigned long address)
if (pud_none(*pud_ref))
return -1;
- if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
+ if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
BUG();
+ if (pud_huge(*pud))
+ return 0;
+
pmd = pmd_offset(pud, address);
pmd_ref = pmd_offset(pud_ref, address);
if (pmd_none(*pmd_ref))
return -1;
- if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
+ if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
BUG();
+ if (pmd_huge(*pmd))
+ return 0;
+
pte_ref = pte_offset_kernel(pmd_ref, address);
if (!pte_present(*pte_ref))
return -1;
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index b2fd67da1701..ef05755a1900 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -123,7 +123,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
break;
}
- if (regno > nr_registers) {
+ if (regno >= nr_registers) {
WARN_ONCE(1, "decoded an instruction with an invalid register");
return -EINVAL;
}
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index a3137a4feed1..b599a780a5a9 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -33,7 +33,7 @@ struct cpa_data {
pgd_t *pgd;
pgprot_t mask_set;
pgprot_t mask_clr;
- int numpages;
+ unsigned long numpages;
int flags;
unsigned long pfn;
unsigned force_split : 1;
@@ -414,24 +414,30 @@ pmd_t *lookup_pmd_address(unsigned long address)
phys_addr_t slow_virt_to_phys(void *__virt_addr)
{
unsigned long virt_addr = (unsigned long)__virt_addr;
- unsigned long phys_addr, offset;
+ phys_addr_t phys_addr;
+ unsigned long offset;
enum pg_level level;
pte_t *pte;
pte = lookup_address(virt_addr, &level);
BUG_ON(!pte);
+ /*
+ * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t
+ * before being left-shifted PAGE_SHIFT bits -- this trick is to
+ * make 32-PAE kernel work correctly.
+ */
switch (level) {
case PG_LEVEL_1G:
- phys_addr = pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
+ phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
offset = virt_addr & ~PUD_PAGE_MASK;
break;
case PG_LEVEL_2M:
- phys_addr = pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
+ phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
offset = virt_addr & ~PMD_PAGE_MASK;
break;
default:
- phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
+ phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
offset = virt_addr & ~PAGE_MASK;
}
@@ -1345,7 +1351,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
* CPA operation. Either a large page has been
* preserved or a single page update happened.
*/
- BUG_ON(cpa->numpages > numpages);
+ BUG_ON(cpa->numpages > numpages || !cpa->numpages);
numpages -= cpa->numpages;
if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
cpa->curpage++;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 8ddb5d0d66fb..5fb6adaaa796 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -106,8 +106,6 @@ static void flush_tlb_func(void *info)
if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
return;
- if (!f->flush_end)
- f->flush_end = f->flush_start + PAGE_SIZE;
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
@@ -135,12 +133,20 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
unsigned long end)
{
struct flush_tlb_info info;
+
+ if (end == 0)
+ end = start + PAGE_SIZE;
info.flush_mm = mm;
info.flush_start = start;
info.flush_end = end;
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
- trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start);
+ if (end == TLB_FLUSH_ALL)
+ trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
+ else
+ trace_tlb_flush(TLB_REMOTE_SEND_IPI,
+ (end - start) >> PAGE_SHIFT);
+
if (is_uv_system()) {
unsigned int cpu;
@@ -161,7 +167,10 @@ void flush_tlb_current_task(void)
preempt_disable();
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+
+ /* This is an implicit full barrier that synchronizes with switch_mm. */
local_flush_tlb();
+
trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
@@ -188,17 +197,29 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
preempt_disable();
- if (current->active_mm != mm)
+ if (current->active_mm != mm) {
+ /* Synchronize with switch_mm. */
+ smp_mb();
+
goto out;
+ }
if (!current->mm) {
leave_mm(smp_processor_id());
+
+ /* Synchronize with switch_mm. */
+ smp_mb();
+
goto out;
}
if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
base_pages_to_flush = (end - start) >> PAGE_SHIFT;
+ /*
+ * Both branches below are implicit full barriers (MOV to CR or
+ * INVLPG) that synchronize with switch_mm.
+ */
if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
base_pages_to_flush = TLB_FLUSH_ALL;
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
@@ -228,10 +249,18 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
preempt_disable();
if (current->active_mm == mm) {
- if (current->mm)
+ if (current->mm) {
+ /*
+ * Implicit full barrier (INVLPG) that synchronizes
+ * with switch_mm.
+ */
__flush_tlb_one(start);
- else
+ } else {
leave_mm(smp_processor_id());
+
+ /* Synchronize with switch_mm. */
+ smp_mb();
+ }
}
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index eccd4d99e6a4..8fd6f44aee83 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -673,28 +673,22 @@ int pcibios_add_device(struct pci_dev *dev)
return 0;
}
-int pcibios_alloc_irq(struct pci_dev *dev)
+int pcibios_enable_device(struct pci_dev *dev, int mask)
{
- /*
- * If the PCI device was already claimed by core code and has
- * MSI enabled, probing of the pcibios IRQ will overwrite
- * dev->irq. So bail out if MSI is already enabled.
- */
- if (pci_dev_msi_enabled(dev))
- return -EBUSY;
+ int err;
- return pcibios_enable_irq(dev);
-}
+ if ((err = pci_enable_resources(dev, mask)) < 0)
+ return err;
-void pcibios_free_irq(struct pci_dev *dev)
-{
- if (pcibios_disable_irq)
- pcibios_disable_irq(dev);
+ if (!pci_dev_msi_enabled(dev))
+ return pcibios_enable_irq(dev);
+ return 0;
}
-int pcibios_enable_device(struct pci_dev *dev, int mask)
+void pcibios_disable_device (struct pci_dev *dev)
{
- return pci_enable_resources(dev, mask);
+ if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
+ pcibios_disable_irq(dev);
}
int pci_ext_cfg_avail(void)
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index e58565556703..0ae7e9fa348d 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -540,3 +540,10 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
+
+static void pci_bdwep_bar(struct pci_dev *dev)
+{
+ dev->non_compliant_bars = 1;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 0d24e7c10145..8b93e634af84 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -215,7 +215,7 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
int polarity;
int ret;
- if (pci_has_managed_irq(dev))
+ if (dev->irq_managed && dev->irq > 0)
return 0;
switch (intel_mid_identify_cpu()) {
@@ -256,13 +256,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
static void intel_mid_pci_irq_disable(struct pci_dev *dev)
{
- if (pci_has_managed_irq(dev)) {
+ if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
+ dev->irq > 0) {
mp_unmap_irq(dev->irq);
dev->irq_managed = 0;
- /*
- * Don't reset dev->irq here, otherwise
- * intel_mid_pci_irq_enable() will fail on next call.
- */
}
}
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 32e70343e6fd..9bd115484745 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1202,7 +1202,7 @@ static int pirq_enable_irq(struct pci_dev *dev)
struct pci_dev *temp_dev;
int irq;
- if (pci_has_managed_irq(dev))
+ if (dev->irq_managed && dev->irq > 0)
return 0;
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
@@ -1230,7 +1230,8 @@ static int pirq_enable_irq(struct pci_dev *dev)
}
dev = temp_dev;
if (irq >= 0) {
- pci_set_managed_irq(dev, irq);
+ dev->irq_managed = 1;
+ dev->irq = irq;
dev_info(&dev->dev, "PCI->APIC IRQ transform: "
"INT %c -> IRQ %d\n", 'A' + pin - 1, irq);
return 0;
@@ -1256,10 +1257,24 @@ static int pirq_enable_irq(struct pci_dev *dev)
return 0;
}
+bool mp_should_keep_irq(struct device *dev)
+{
+ if (dev->power.is_prepared)
+ return true;
+#ifdef CONFIG_PM
+ if (dev->power.runtime_status == RPM_SUSPENDING)
+ return true;
+#endif
+
+ return false;
+}
+
static void pirq_disable_irq(struct pci_dev *dev)
{
- if (io_apic_assign_pci_irqs && pci_has_managed_irq(dev)) {
+ if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
+ dev->irq_managed && dev->irq) {
mp_unmap_irq(dev->irq);
- pci_reset_managed_irq(dev);
+ dev->irq = 0;
+ dev->irq_managed = 0;
}
}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index b7de78bdc09c..beab8c706ac9 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -961,7 +961,7 @@ static void xen_load_sp0(struct tss_struct *tss,
tss->x86_tss.sp0 = thread->sp0;
}
-static void xen_set_iopl_mask(unsigned mask)
+void xen_set_iopl_mask(unsigned mask)
{
struct physdev_set_iopl set_iopl;
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index df0c40559583..7f664c416faf 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -34,7 +34,8 @@ static void xen_hvm_post_suspend(int suspend_cancelled)
{
#ifdef CONFIG_XEN_PVHVM
int cpu;
- xen_hvm_init_shared_info();
+ if (!suspend_cancelled)
+ xen_hvm_init_shared_info();
xen_callback_vector();
xen_unplug_emulated_devices();
if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 9ed55649ac8e..05e1df943856 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -128,7 +128,7 @@ ENTRY(_startup)
wsr a0, icountlevel
.set _index, 0
- .rept XCHAL_NUM_DBREAK - 1
+ .rept XCHAL_NUM_DBREAK
wsr a0, SREG_DBREAKC + _index
.set _index, _index + 1
.endr
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index d75aa1476da7..1a804a2f9a5b 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -97,11 +97,11 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
unsigned long paddr;
void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
- pagefault_disable();
+ preempt_disable();
kmap_invalidate_coherent(page, vaddr);
set_bit(PG_arch_1, &page->flags);
clear_page_alias(kvaddr, paddr);
- pagefault_enable();
+ preempt_enable();
}
void copy_user_highpage(struct page *dst, struct page *src,
@@ -113,11 +113,11 @@ void copy_user_highpage(struct page *dst, struct page *src,
void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
&src_paddr);
- pagefault_disable();
+ preempt_disable();
kmap_invalidate_coherent(dst, vaddr);
set_bit(PG_arch_1, &dst->flags);
copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
- pagefault_enable();
+ preempt_enable();
}
#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index 70cb408bc20d..92d785fefb6d 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -100,21 +100,23 @@ static void rs_poll(unsigned long priv)
{
struct tty_port *port = (struct tty_port *)priv;
int i = 0;
+ int rd = 1;
unsigned char c;
spin_lock(&timer_lock);
while (simc_poll(0)) {
- simc_read(0, &c, 1);
+ rd = simc_read(0, &c, 1);
+ if (rd <= 0)
+ break;
tty_insert_flip_char(port, c, TTY_NORMAL);
i++;
}
if (i)
tty_flip_buffer_push(port);
-
-
- mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
+ if (rd)
+ mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
spin_unlock(&timer_lock);
}
diff --git a/block/bio.c b/block/bio.c
index 4f184d938942..d4d144363250 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1090,9 +1090,12 @@ int bio_uncopy_user(struct bio *bio)
if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
/*
* if we're in a workqueue, the request is orphaned, so
- * don't copy into a random user address space, just free.
+ * don't copy into a random user address space, just free
+ * and return -EINTR so user space doesn't expect any data.
*/
- if (current->mm && bio_data_dir(bio) == READ)
+ if (!current->mm)
+ ret = -EINTR;
+ else if (bio_data_dir(bio) == READ)
ret = bio_copy_to_iter(bio, bmd->iter);
if (bmd->is_our_pages)
bio_free_pages(bio);
diff --git a/block/blk-core.c b/block/blk-core.c
index 33e2f62d5062..f8e64cac981a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2189,7 +2189,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
if (q->mq_ops) {
if (blk_queue_io_stat(q))
blk_account_io_start(rq, true);
- blk_mq_insert_request(rq, false, true, true);
+ blk_mq_insert_request(rq, false, true, false);
return 0;
}
diff --git a/block/blk-merge.c b/block/blk-merge.c
index e01405a3e8b3..b966db8f3556 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -68,6 +68,18 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q,
return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
}
+static inline unsigned get_max_io_size(struct request_queue *q,
+ struct bio *bio)
+{
+ unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
+ unsigned mask = queue_logical_block_size(q) - 1;
+
+ /* aligned to logical block size */
+ sectors &= ~(mask >> 9);
+
+ return sectors;
+}
+
static struct bio *blk_bio_segment_split(struct request_queue *q,
struct bio *bio,
struct bio_set *bs,
@@ -79,11 +91,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
unsigned front_seg_size = bio->bi_seg_front_size;
bool do_split = true;
struct bio *new = NULL;
+ const unsigned max_sectors = get_max_io_size(q, bio);
bio_for_each_segment(bv, bio, iter) {
- if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
- goto split;
-
/*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
@@ -91,6 +101,21 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
goto split;
+ if (sectors + (bv.bv_len >> 9) > max_sectors) {
+ /*
+ * Consider this a new segment if we're splitting in
+ * the middle of this vector.
+ */
+ if (nsegs < queue_max_segments(q) &&
+ sectors < max_sectors) {
+ nsegs++;
+ sectors = max_sectors;
+ }
+ if (sectors)
+ goto split;
+ /* Make this single bvec as the 1st segment */
+ }
+
if (bvprvp && blk_queue_cluster(q)) {
if (seg_size + bv.bv_len > queue_max_segment_size(q))
goto new_segment;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index dd4973583978..c7bb666aafd1 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -91,8 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
lim->virt_boundary_mask = 0;
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
- lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors =
- BLK_SAFE_MAX_SECTORS;
+ lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
+ lim->max_dev_sectors = 0;
lim->chunk_sectors = 0;
lim->max_write_same_sectors = 0;
lim->max_discard_sectors = 0;
diff --git a/build.config b/build.config
new file mode 100644
index 000000000000..71031ae7abf3
--- /dev/null
+++ b/build.config
@@ -0,0 +1,13 @@
+ARCH=arm64
+BRANCH=mirror-aosp-android-hikey-linaro-4.4
+CROSS_COMPILE=aarch64-linux-android-
+DEFCONFIG=hikey_defconfig
+EXTRA_CMDS=''
+KERNEL_DIR=hikey-linaro
+LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin
+FILES="
+arch/arm64/boot/Image
+arch/arm64/boot/dts/hisilicon/hi6220-hikey.dtb
+vmlinux
+System.map
+"
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index a8e7aa3e257b..f5e18c2a4852 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -76,6 +76,8 @@ int af_alg_register_type(const struct af_alg_type *type)
goto unlock;
type->ops->owner = THIS_MODULE;
+ if (type->ops_nokey)
+ type->ops_nokey->owner = THIS_MODULE;
node->type = type;
list_add(&node->list, &alg_types);
err = 0;
@@ -125,6 +127,26 @@ int af_alg_release(struct socket *sock)
}
EXPORT_SYMBOL_GPL(af_alg_release);
+void af_alg_release_parent(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ unsigned int nokey = ask->nokey_refcnt;
+ bool last = nokey && !ask->refcnt;
+
+ sk = ask->parent;
+ ask = alg_sk(sk);
+
+ lock_sock(sk);
+ ask->nokey_refcnt -= nokey;
+ if (!last)
+ last = !--ask->refcnt;
+ release_sock(sk);
+
+ if (last)
+ sock_put(sk);
+}
+EXPORT_SYMBOL_GPL(af_alg_release_parent);
+
static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
const u32 forbidden = CRYPTO_ALG_INTERNAL;
@@ -133,6 +155,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct sockaddr_alg *sa = (void *)uaddr;
const struct af_alg_type *type;
void *private;
+ int err;
if (sock->state == SS_CONNECTED)
return -EINVAL;
@@ -160,16 +183,22 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
return PTR_ERR(private);
}
+ err = -EBUSY;
lock_sock(sk);
+ if (ask->refcnt | ask->nokey_refcnt)
+ goto unlock;
swap(ask->type, type);
swap(ask->private, private);
+ err = 0;
+
+unlock:
release_sock(sk);
alg_do_release(type, private);
- return 0;
+ return err;
}
static int alg_setkey(struct sock *sk, char __user *ukey,
@@ -202,11 +231,15 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
const struct af_alg_type *type;
- int err = -ENOPROTOOPT;
+ int err = -EBUSY;
lock_sock(sk);
+ if (ask->refcnt)
+ goto unlock;
+
type = ask->type;
+ err = -ENOPROTOOPT;
if (level != SOL_ALG || !type)
goto unlock;
@@ -238,6 +271,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
struct alg_sock *ask = alg_sk(sk);
const struct af_alg_type *type;
struct sock *sk2;
+ unsigned int nokey;
int err;
lock_sock(sk);
@@ -257,20 +291,29 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
security_sk_clone(sk, sk2);
err = type->accept(ask->private, sk2);
- if (err) {
- sk_free(sk2);
+
+ nokey = err == -ENOKEY;
+ if (nokey && type->accept_nokey)
+ err = type->accept_nokey(ask->private, sk2);
+
+ if (err)
goto unlock;
- }
sk2->sk_family = PF_ALG;
- sock_hold(sk);
+ if (nokey || !ask->refcnt++)
+ sock_hold(sk);
+ ask->nokey_refcnt += nokey;
alg_sk(sk2)->parent = sk;
alg_sk(sk2)->type = type;
+ alg_sk(sk2)->nokey_refcnt = nokey;
newsock->ops = type->ops;
newsock->state = SS_CONNECTED;
+ if (nokey)
+ newsock->ops = type->ops_nokey;
+
err = 0;
unlock:
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 9c1dc8d6106a..d19b52324cf5 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -451,6 +451,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
struct ahash_alg *alg = crypto_ahash_alg(hash);
hash->setkey = ahash_nosetkey;
+ hash->has_setkey = false;
hash->export = ahash_no_export;
hash->import = ahash_no_import;
@@ -463,8 +464,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
hash->finup = alg->finup ?: ahash_def_finup;
hash->digest = alg->digest;
- if (alg->setkey)
+ if (alg->setkey) {
hash->setkey = alg->setkey;
+ hash->has_setkey = true;
+ }
if (alg->export)
hash->export = alg->export;
if (alg->import)
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index b4c24fe3dcfb..68a5ceaa04c8 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -34,6 +34,11 @@ struct hash_ctx {
struct ahash_request req;
};
+struct algif_hash_tfm {
+ struct crypto_ahash *hash;
+ bool has_key;
+};
+
static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
size_t ignored)
{
@@ -49,7 +54,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
lock_sock(sk);
if (!ctx->more) {
- err = crypto_ahash_init(&ctx->req);
+ err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
+ &ctx->completion);
if (err)
goto unlock;
}
@@ -120,6 +126,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
} else {
if (!ctx->more) {
err = crypto_ahash_init(&ctx->req);
+ err = af_alg_wait_for_completion(err, &ctx->completion);
if (err)
goto unlock;
}
@@ -235,19 +242,151 @@ static struct proto_ops algif_hash_ops = {
.accept = hash_accept,
};
+static int hash_check_key(struct socket *sock)
+{
+ int err = 0;
+ struct sock *psk;
+ struct alg_sock *pask;
+ struct algif_hash_tfm *tfm;
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+
+ lock_sock(sk);
+ if (ask->refcnt)
+ goto unlock_child;
+
+ psk = ask->parent;
+ pask = alg_sk(ask->parent);
+ tfm = pask->private;
+
+ err = -ENOKEY;
+ lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
+ if (!tfm->has_key)
+ goto unlock;
+
+ if (!pask->refcnt++)
+ sock_hold(psk);
+
+ ask->refcnt = 1;
+ sock_put(psk);
+
+ err = 0;
+
+unlock:
+ release_sock(psk);
+unlock_child:
+ release_sock(sk);
+
+ return err;
+}
+
+static int hash_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
+ size_t size)
+{
+ int err;
+
+ err = hash_check_key(sock);
+ if (err)
+ return err;
+
+ return hash_sendmsg(sock, msg, size);
+}
+
+static ssize_t hash_sendpage_nokey(struct socket *sock, struct page *page,
+ int offset, size_t size, int flags)
+{
+ int err;
+
+ err = hash_check_key(sock);
+ if (err)
+ return err;
+
+ return hash_sendpage(sock, page, offset, size, flags);
+}
+
+static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
+{
+ int err;
+
+ err = hash_check_key(sock);
+ if (err)
+ return err;
+
+ return hash_recvmsg(sock, msg, ignored, flags);
+}
+
+static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
+ int flags)
+{
+ int err;
+
+ err = hash_check_key(sock);
+ if (err)
+ return err;
+
+ return hash_accept(sock, newsock, flags);
+}
+
+static struct proto_ops algif_hash_ops_nokey = {
+ .family = PF_ALG,
+
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .getname = sock_no_getname,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .getsockopt = sock_no_getsockopt,
+ .mmap = sock_no_mmap,
+ .bind = sock_no_bind,
+ .setsockopt = sock_no_setsockopt,
+ .poll = sock_no_poll,
+
+ .release = af_alg_release,
+ .sendmsg = hash_sendmsg_nokey,
+ .sendpage = hash_sendpage_nokey,
+ .recvmsg = hash_recvmsg_nokey,
+ .accept = hash_accept_nokey,
+};
+
static void *hash_bind(const char *name, u32 type, u32 mask)
{
- return crypto_alloc_ahash(name, type, mask);
+ struct algif_hash_tfm *tfm;
+ struct crypto_ahash *hash;
+
+ tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
+ if (!tfm)
+ return ERR_PTR(-ENOMEM);
+
+ hash = crypto_alloc_ahash(name, type, mask);
+ if (IS_ERR(hash)) {
+ kfree(tfm);
+ return ERR_CAST(hash);
+ }
+
+ tfm->hash = hash;
+
+ return tfm;
}
static void hash_release(void *private)
{
- crypto_free_ahash(private);
+ struct algif_hash_tfm *tfm = private;
+
+ crypto_free_ahash(tfm->hash);
+ kfree(tfm);
}
static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
{
- return crypto_ahash_setkey(private, key, keylen);
+ struct algif_hash_tfm *tfm = private;
+ int err;
+
+ err = crypto_ahash_setkey(tfm->hash, key, keylen);
+ tfm->has_key = !err;
+
+ return err;
}
static void hash_sock_destruct(struct sock *sk)
@@ -261,12 +400,14 @@ static void hash_sock_destruct(struct sock *sk)
af_alg_release_parent(sk);
}
-static int hash_accept_parent(void *private, struct sock *sk)
+static int hash_accept_parent_nokey(void *private, struct sock *sk)
{
struct hash_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
- unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private);
- unsigned ds = crypto_ahash_digestsize(private);
+ struct algif_hash_tfm *tfm = private;
+ struct crypto_ahash *hash = tfm->hash;
+ unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
+ unsigned ds = crypto_ahash_digestsize(hash);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@@ -286,7 +427,7 @@ static int hash_accept_parent(void *private, struct sock *sk)
ask->private = ctx;
- ahash_request_set_tfm(&ctx->req, private);
+ ahash_request_set_tfm(&ctx->req, hash);
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
@@ -295,12 +436,24 @@ static int hash_accept_parent(void *private, struct sock *sk)
return 0;
}
+static int hash_accept_parent(void *private, struct sock *sk)
+{
+ struct algif_hash_tfm *tfm = private;
+
+ if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash))
+ return -ENOKEY;
+
+ return hash_accept_parent_nokey(private, sk);
+}
+
static const struct af_alg_type algif_type_hash = {
.bind = hash_bind,
.release = hash_release,
.setkey = hash_setkey,
.accept = hash_accept_parent,
+ .accept_nokey = hash_accept_parent_nokey,
.ops = &algif_hash_ops,
+ .ops_nokey = &algif_hash_ops_nokey,
.name = "hash",
.owner = THIS_MODULE
};
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 634b4d1ab681..f5e9f9310b48 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -31,6 +31,11 @@ struct skcipher_sg_list {
struct scatterlist sg[0];
};
+struct skcipher_tfm {
+ struct crypto_skcipher *skcipher;
+ bool has_key;
+};
+
struct skcipher_ctx {
struct list_head tsgl;
struct af_alg_sgl rsgl;
@@ -60,18 +65,10 @@ struct skcipher_async_req {
struct skcipher_async_rsgl first_sgl;
struct list_head list;
struct scatterlist *tsg;
- char iv[];
+ atomic_t *inflight;
+ struct skcipher_request req;
};
-#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
- crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
-
-#define GET_REQ_SIZE(ctx) \
- crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
-
-#define GET_IV_SIZE(ctx) \
- crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
-
#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
sizeof(struct scatterlist) - 1)
@@ -97,15 +94,12 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
static void skcipher_async_cb(struct crypto_async_request *req, int err)
{
- struct sock *sk = req->data;
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
+ struct skcipher_async_req *sreq = req->data;
struct kiocb *iocb = sreq->iocb;
- atomic_dec(&ctx->inflight);
+ atomic_dec(sreq->inflight);
skcipher_free_async_sgls(sreq);
- kfree(req);
+ kzfree(sreq);
iocb->ki_complete(iocb, err, err);
}
@@ -301,8 +295,11 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
struct skcipher_ctx *ctx = ask->private;
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
+ struct skcipher_tfm *skc = pask->private;
+ struct crypto_skcipher *tfm = skc->skcipher;
unsigned ivsize = crypto_skcipher_ivsize(tfm);
struct skcipher_sg_list *sgl;
struct af_alg_control con = {};
@@ -387,7 +384,8 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
sg = sgl->sg;
- sg_unmark_end(sg + sgl->cur);
+ if (sgl->cur)
+ sg_unmark_end(sg + sgl->cur - 1);
do {
i = sgl->cur;
plen = min_t(int, len, PAGE_SIZE);
@@ -503,37 +501,43 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
struct skcipher_ctx *ctx = ask->private;
+ struct skcipher_tfm *skc = pask->private;
+ struct crypto_skcipher *tfm = skc->skcipher;
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
struct skcipher_async_req *sreq;
struct skcipher_request *req;
struct skcipher_async_rsgl *last_rsgl = NULL;
- unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
- unsigned int reqlen = sizeof(struct skcipher_async_req) +
- GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
+ unsigned int txbufs = 0, len = 0, tx_nents;
+ unsigned int reqsize = crypto_skcipher_reqsize(tfm);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
int err = -ENOMEM;
bool mark = false;
+ char *iv;
- lock_sock(sk);
- req = kmalloc(reqlen, GFP_KERNEL);
- if (unlikely(!req))
- goto unlock;
+ sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
+ if (unlikely(!sreq))
+ goto out;
- sreq = GET_SREQ(req, ctx);
+ req = &sreq->req;
+ iv = (char *)(req + 1) + reqsize;
sreq->iocb = msg->msg_iocb;
- memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
INIT_LIST_HEAD(&sreq->list);
+ sreq->inflight = &ctx->inflight;
+
+ lock_sock(sk);
+ tx_nents = skcipher_all_sg_nents(ctx);
sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
- if (unlikely(!sreq->tsg)) {
- kfree(req);
+ if (unlikely(!sreq->tsg))
goto unlock;
- }
sg_init_table(sreq->tsg, tx_nents);
- memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
- skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- skcipher_async_cb, sk);
+ memcpy(iv, ctx->iv, ivsize);
+ skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+ skcipher_async_cb, sreq);
while (iov_iter_count(&msg->msg_iter)) {
struct skcipher_async_rsgl *rsgl;
@@ -609,20 +613,22 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
sg_mark_end(sreq->tsg + txbufs - 1);
skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
- len, sreq->iv);
+ len, iv);
err = ctx->enc ? crypto_skcipher_encrypt(req) :
crypto_skcipher_decrypt(req);
if (err == -EINPROGRESS) {
atomic_inc(&ctx->inflight);
err = -EIOCBQUEUED;
+ sreq = NULL;
goto unlock;
}
free:
skcipher_free_async_sgls(sreq);
- kfree(req);
unlock:
skcipher_wmem_wakeup(sk);
release_sock(sk);
+ kzfree(sreq);
+out:
return err;
}
@@ -631,9 +637,12 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
struct skcipher_ctx *ctx = ask->private;
- unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
- &ctx->req));
+ struct skcipher_tfm *skc = pask->private;
+ struct crypto_skcipher *tfm = skc->skcipher;
+ unsigned bs = crypto_skcipher_blocksize(tfm);
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
int err = -EAGAIN;
@@ -642,13 +651,6 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
lock_sock(sk);
while (msg_data_left(msg)) {
- sgl = list_first_entry(&ctx->tsgl,
- struct skcipher_sg_list, list);
- sg = sgl->sg;
-
- while (!sg->length)
- sg++;
-
if (!ctx->used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
@@ -669,6 +671,13 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
if (!used)
goto free;
+ sgl = list_first_entry(&ctx->tsgl,
+ struct skcipher_sg_list, list);
+ sg = sgl->sg;
+
+ while (!sg->length)
+ sg++;
+
skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
ctx->iv);
@@ -748,19 +757,139 @@ static struct proto_ops algif_skcipher_ops = {
.poll = skcipher_poll,
};
+static int skcipher_check_key(struct socket *sock)
+{
+ int err = 0;
+ struct sock *psk;
+ struct alg_sock *pask;
+ struct skcipher_tfm *tfm;
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+
+ lock_sock(sk);
+ if (ask->refcnt)
+ goto unlock_child;
+
+ psk = ask->parent;
+ pask = alg_sk(ask->parent);
+ tfm = pask->private;
+
+ err = -ENOKEY;
+ lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
+ if (!tfm->has_key)
+ goto unlock;
+
+ if (!pask->refcnt++)
+ sock_hold(psk);
+
+ ask->refcnt = 1;
+ sock_put(psk);
+
+ err = 0;
+
+unlock:
+ release_sock(psk);
+unlock_child:
+ release_sock(sk);
+
+ return err;
+}
+
+static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
+ size_t size)
+{
+ int err;
+
+ err = skcipher_check_key(sock);
+ if (err)
+ return err;
+
+ return skcipher_sendmsg(sock, msg, size);
+}
+
+static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
+ int offset, size_t size, int flags)
+{
+ int err;
+
+ err = skcipher_check_key(sock);
+ if (err)
+ return err;
+
+ return skcipher_sendpage(sock, page, offset, size, flags);
+}
+
+static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
+{
+ int err;
+
+ err = skcipher_check_key(sock);
+ if (err)
+ return err;
+
+ return skcipher_recvmsg(sock, msg, ignored, flags);
+}
+
+static struct proto_ops algif_skcipher_ops_nokey = {
+ .family = PF_ALG,
+
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .getname = sock_no_getname,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .getsockopt = sock_no_getsockopt,
+ .mmap = sock_no_mmap,
+ .bind = sock_no_bind,
+ .accept = sock_no_accept,
+ .setsockopt = sock_no_setsockopt,
+
+ .release = af_alg_release,
+ .sendmsg = skcipher_sendmsg_nokey,
+ .sendpage = skcipher_sendpage_nokey,
+ .recvmsg = skcipher_recvmsg_nokey,
+ .poll = skcipher_poll,
+};
+
static void *skcipher_bind(const char *name, u32 type, u32 mask)
{
- return crypto_alloc_skcipher(name, type, mask);
+ struct skcipher_tfm *tfm;
+ struct crypto_skcipher *skcipher;
+
+ tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
+ if (!tfm)
+ return ERR_PTR(-ENOMEM);
+
+ skcipher = crypto_alloc_skcipher(name, type, mask);
+ if (IS_ERR(skcipher)) {
+ kfree(tfm);
+ return ERR_CAST(skcipher);
+ }
+
+ tfm->skcipher = skcipher;
+
+ return tfm;
}
static void skcipher_release(void *private)
{
- crypto_free_skcipher(private);
+ struct skcipher_tfm *tfm = private;
+
+ crypto_free_skcipher(tfm->skcipher);
+ kfree(tfm);
}
static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
{
- return crypto_skcipher_setkey(private, key, keylen);
+ struct skcipher_tfm *tfm = private;
+ int err;
+
+ err = crypto_skcipher_setkey(tfm->skcipher, key, keylen);
+ tfm->has_key = !err;
+
+ return err;
}
static void skcipher_wait(struct sock *sk)
@@ -788,24 +917,26 @@ static void skcipher_sock_destruct(struct sock *sk)
af_alg_release_parent(sk);
}
-static int skcipher_accept_parent(void *private, struct sock *sk)
+static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
{
struct skcipher_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
- unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(private);
+ struct skcipher_tfm *tfm = private;
+ struct crypto_skcipher *skcipher = tfm->skcipher;
+ unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(private),
+ ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),
GFP_KERNEL);
if (!ctx->iv) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
- memset(ctx->iv, 0, crypto_skcipher_ivsize(private));
+ memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
INIT_LIST_HEAD(&ctx->tsgl);
ctx->len = len;
@@ -818,8 +949,9 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
ask->private = ctx;
- skcipher_request_set_tfm(&ctx->req, private);
- skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ skcipher_request_set_tfm(&ctx->req, skcipher);
+ skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
sk->sk_destruct = skcipher_sock_destruct;
@@ -827,12 +959,24 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
return 0;
}
+static int skcipher_accept_parent(void *private, struct sock *sk)
+{
+ struct skcipher_tfm *tfm = private;
+
+ if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher))
+ return -ENOKEY;
+
+ return skcipher_accept_parent_nokey(private, sk);
+}
+
static const struct af_alg_type algif_type_skcipher = {
.bind = skcipher_bind,
.release = skcipher_release,
.setkey = skcipher_setkey,
.accept = skcipher_accept_parent,
+ .accept_nokey = skcipher_accept_parent_nokey,
.ops = &algif_skcipher_ops,
+ .ops_nokey = &algif_skcipher_ops_nokey,
.name = "skcipher",
.owner = THIS_MODULE
};
diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
index 90d6d47965b0..ecdb5a2ce085 100644
--- a/crypto/asymmetric_keys/pkcs7_trust.c
+++ b/crypto/asymmetric_keys/pkcs7_trust.c
@@ -178,6 +178,8 @@ int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
int cached_ret = -ENOKEY;
int ret;
+ *_trusted = false;
+
for (p = pkcs7->certs; p; p = p->next)
p->seen = false;
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 021d39c0ba75..13c4e5a5fe8c 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -494,7 +494,7 @@ int x509_decode_time(time64_t *_t, size_t hdrlen,
unsigned char tag,
const unsigned char *value, size_t vlen)
{
- static const unsigned char month_lengths[] = { 31, 29, 31, 30, 31, 30,
+ static const unsigned char month_lengths[] = { 31, 28, 31, 30, 31, 30,
31, 31, 30, 31, 30, 31 };
const unsigned char *p = value;
unsigned year, mon, day, hour, min, sec, mon_len;
@@ -540,9 +540,9 @@ int x509_decode_time(time64_t *_t, size_t hdrlen,
if (year % 4 == 0) {
mon_len = 29;
if (year % 100 == 0) {
- year /= 100;
- if (year % 4 != 0)
- mon_len = 28;
+ mon_len = 28;
+ if (year % 400 == 0)
+ mon_len = 29;
}
}
}
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 06f1b60f02b2..4c0a0e271876 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -172,4 +172,3 @@ MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crc32c");
MODULE_ALIAS_CRYPTO("crc32c-generic");
-MODULE_SOFTDEP("pre: crc32c");
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 237f3795cfaa..43fe85f20d57 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -499,6 +499,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (link->dump == NULL)
return -EINVAL;
+ down_read(&crypto_alg_sem);
list_for_each_entry(alg, &crypto_alg_list, cra_list)
dump_alloc += CRYPTO_REPORT_MAXSIZE;
@@ -508,8 +509,11 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
.done = link->done,
.min_dump_alloc = dump_alloc,
};
- return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
+ err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
}
+ up_read(&crypto_alg_sem);
+
+ return err;
}
err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
index b1d106ce55f3..72014f963ba7 100644
--- a/crypto/keywrap.c
+++ b/crypto/keywrap.c
@@ -212,7 +212,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
SEMIBSIZE))
ret = -EBADMSG;
- memzero_explicit(&block, sizeof(struct crypto_kw_block));
+ memzero_explicit(block, sizeof(struct crypto_kw_block));
return ret;
}
@@ -297,7 +297,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
/* establish the IV for the caller to pick up */
memcpy(desc->info, block->A, SEMIBSIZE);
- memzero_explicit(&block, sizeof(struct crypto_kw_block));
+ memzero_explicit(block, sizeof(struct crypto_kw_block));
return 0;
}
diff --git a/crypto/shash.c b/crypto/shash.c
index ecb1e3d39bf0..359754591653 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -354,9 +354,10 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
crt->final = shash_async_final;
crt->finup = shash_async_finup;
crt->digest = shash_async_digest;
+ crt->setkey = shash_async_setkey;
+
+ crt->has_setkey = alg->setkey != shash_no_setkey;
- if (alg->setkey)
- crt->setkey = shash_async_setkey;
if (alg->export)
crt->export = shash_async_export;
if (alg->import)
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 7591928be7ca..d199c0b1751c 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -118,6 +118,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
skcipher->decrypt = skcipher_decrypt_blkcipher;
skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
+ skcipher->has_setkey = calg->cra_blkcipher.max_keysize;
return 0;
}
@@ -210,6 +211,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
sizeof(struct ablkcipher_request);
+ skcipher->has_setkey = calg->cra_ablkcipher.max_keysize;
return 0;
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 3405f7a41e25..5fdac394207a 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -465,6 +465,15 @@ static struct dmi_system_id video_dmi_table[] = {
* as brightness control does not work.
*/
{
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+ .callback = video_disable_backlight_sysfs_if,
+ .ident = "Toshiba Portege R700",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
+ },
+ },
+ {
/* https://bugs.freedesktop.org/show_bug.cgi?id=82634 */
.callback = video_disable_backlight_sysfs_if,
.ident = "Toshiba Portege R830",
@@ -473,6 +482,15 @@ static struct dmi_system_id video_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R830"),
},
},
+ {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+ .callback = video_disable_backlight_sysfs_if,
+ .ident = "Toshiba Satellite R830",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"),
+ },
+ },
/*
* Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set
* but the IDs actually follow the Device ID Scheme.
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index aa45d4802707..11d8209e6e5d 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -468,37 +468,16 @@ static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
nfit_mem->bdw = NULL;
}
-static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
+static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
{
u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
struct nfit_memdev *nfit_memdev;
struct nfit_flush *nfit_flush;
- struct nfit_dcr *nfit_dcr;
struct nfit_bdw *nfit_bdw;
struct nfit_idt *nfit_idt;
u16 idt_idx, range_index;
- list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
- if (nfit_dcr->dcr->region_index != dcr)
- continue;
- nfit_mem->dcr = nfit_dcr->dcr;
- break;
- }
-
- if (!nfit_mem->dcr) {
- dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
- spa->range_index, __to_nfit_memdev(nfit_mem)
- ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
- return -ENODEV;
- }
-
- /*
- * We've found enough to create an nvdimm, optionally
- * find an associated BDW
- */
- list_add(&nfit_mem->list, &acpi_desc->dimms);
-
list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
if (nfit_bdw->bdw->region_index != dcr)
continue;
@@ -507,12 +486,12 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
}
if (!nfit_mem->bdw)
- return 0;
+ return;
nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
if (!nfit_mem->spa_bdw)
- return 0;
+ return;
range_index = nfit_mem->spa_bdw->range_index;
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
@@ -537,8 +516,6 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
}
break;
}
-
- return 0;
}
static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
@@ -547,7 +524,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem, *found;
struct nfit_memdev *nfit_memdev;
int type = nfit_spa_type(spa);
- u16 dcr;
switch (type) {
case NFIT_SPA_DCR:
@@ -558,14 +534,18 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
}
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
- int rc;
+ struct nfit_dcr *nfit_dcr;
+ u32 device_handle;
+ u16 dcr;
if (nfit_memdev->memdev->range_index != spa->range_index)
continue;
found = NULL;
dcr = nfit_memdev->memdev->region_index;
+ device_handle = nfit_memdev->memdev->device_handle;
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
- if (__to_nfit_memdev(nfit_mem)->region_index == dcr) {
+ if (__to_nfit_memdev(nfit_mem)->device_handle
+ == device_handle) {
found = nfit_mem;
break;
}
@@ -578,6 +558,31 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
if (!nfit_mem)
return -ENOMEM;
INIT_LIST_HEAD(&nfit_mem->list);
+ list_add(&nfit_mem->list, &acpi_desc->dimms);
+ }
+
+ list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
+ if (nfit_dcr->dcr->region_index != dcr)
+ continue;
+ /*
+ * Record the control region for the dimm. For
+ * the ACPI 6.1 case, where there are separate
+ * control regions for the pmem vs blk
+ * interfaces, be sure to record the extended
+ * blk details.
+ */
+ if (!nfit_mem->dcr)
+ nfit_mem->dcr = nfit_dcr->dcr;
+ else if (nfit_mem->dcr->windows == 0
+ && nfit_dcr->dcr->windows)
+ nfit_mem->dcr = nfit_dcr->dcr;
+ break;
+ }
+
+ if (dcr && !nfit_mem->dcr) {
+ dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
+ spa->range_index, dcr);
+ return -ENODEV;
}
if (type == NFIT_SPA_DCR) {
@@ -594,6 +599,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
nfit_mem->idt_dcr = nfit_idt->idt;
break;
}
+ nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
} else {
/*
* A single dimm may belong to multiple SPA-PM
@@ -602,13 +608,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
*/
nfit_mem->memdev_pmem = nfit_memdev->memdev;
}
-
- if (found)
- continue;
-
- rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
- if (rc)
- return rc;
}
return 0;
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index c9336751e5e3..8a10a7ae6a8a 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -409,7 +409,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
return 0;
}
- if (pci_has_managed_irq(dev))
+ if (dev->irq_managed && dev->irq > 0)
return 0;
entry = acpi_pci_irq_lookup(dev, pin);
@@ -454,7 +454,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
kfree(entry);
return rc;
}
- pci_set_managed_irq(dev, rc);
+ dev->irq = rc;
+ dev->irq_managed = 1;
if (link)
snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
@@ -477,9 +478,17 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
u8 pin;
pin = dev->pin;
- if (!pin || !pci_has_managed_irq(dev))
+ if (!pin || !dev->irq_managed || dev->irq <= 0)
return;
+ /* Keep IOAPIC pin configuration when suspending */
+ if (dev->dev.power.is_prepared)
+ return;
+#ifdef CONFIG_PM
+ if (dev->dev.power.runtime_status == RPM_SUSPENDING)
+ return;
+#endif
+
entry = acpi_pci_irq_lookup(dev, pin);
if (!entry)
return;
@@ -499,6 +508,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
if (gsi >= 0) {
acpi_unregister_gsi(gsi);
- pci_reset_managed_irq(dev);
+ dev->irq_managed = 0;
}
}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index cdc5c2599beb..627f8fbb5e9a 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -26,8 +26,20 @@
#ifdef CONFIG_X86
#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
+static inline bool acpi_iospace_resource_valid(struct resource *res)
+{
+ /* On X86 IO space is limited to the [0 - 64K] IO port range */
+ return res->end < 0x10003;
+}
#else
#define valid_IRQ(i) (true)
+/*
+ * ACPI IO descriptors on arches other than X86 contain MMIO CPU physical
+ * addresses mapping IO space in CPU physical address space, IO space
+ * resources can be placed anywhere in the 64-bit physical address space.
+ */
+static inline bool
+acpi_iospace_resource_valid(struct resource *res) { return true; }
#endif
static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
@@ -126,7 +138,7 @@ static void acpi_dev_ioresource_flags(struct resource *res, u64 len,
if (!acpi_dev_resource_len_valid(res->start, res->end, len, true))
res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
- if (res->end >= 0x10003)
+ if (!acpi_iospace_resource_valid(res))
res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
if (io_decode == ACPI_DECODE_16)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 0d94621dc856..e3322adaaae0 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -714,6 +714,7 @@ static int acpi_hibernation_enter(void)
static void acpi_hibernation_leave(void)
{
+ pm_set_resume_via_firmware();
/*
* If ACPI is not enabled by the BIOS and the boot kernel, we need to
* enable it here.
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index daaf1c4e1e0f..80e55cb0827b 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -135,14 +135,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
- {
- .callback = video_detect_force_vendor,
- .ident = "Dell Inspiron 5737",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
- },
- },
/*
* These models have a working acpi_video backlight control, and using
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index f0ce9959d14d..57f52a2afa35 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2081,7 +2081,7 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(cookie);
list_for_each_entry(w, &proc->delivered_death, entry) {
struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index cdfbcc54821f..60a15831c009 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -264,6 +264,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
@@ -347,15 +367,21 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 4665512dae44..998c6a85ad89 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -495,8 +495,8 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
}
}
- /* fabricate port_map from cap.nr_ports */
- if (!port_map) {
+ /* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
+ if (!port_map && vers < 0x10300) {
port_map = (1 << ahci_nr_ports(cap)) - 1;
dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
@@ -1142,8 +1142,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
/* mark esata ports */
tmp = readl(port_mmio + PORT_CMD);
- if ((tmp & PORT_CMD_HPCP) ||
- ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)))
+ if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
ap->pflags |= ATA_PFLAG_EXTERNAL;
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 7e959f90c020..e417e1a1d02c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -675,19 +675,18 @@ static int ata_ioc32(struct ata_port *ap)
int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
int cmd, void __user *arg)
{
- int val = -EINVAL, rc = -EINVAL;
+ unsigned long val;
+ int rc = -EINVAL;
unsigned long flags;
switch (cmd) {
- case ATA_IOC_GET_IO32:
+ case HDIO_GET_32BIT:
spin_lock_irqsave(ap->lock, flags);
val = ata_ioc32(ap);
spin_unlock_irqrestore(ap->lock, flags);
- if (copy_to_user(arg, &val, 1))
- return -EFAULT;
- return 0;
+ return put_user(val, (unsigned long __user *)arg);
- case ATA_IOC_SET_IO32:
+ case HDIO_SET_32BIT:
val = (unsigned long) arg;
rc = 0;
spin_lock_irqsave(ap->lock, flags);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index cdf6215a9a22..7dbba387d12a 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
{
struct ata_port *ap = qc->ap;
- unsigned long flags;
if (ap->ops->error_handler) {
if (in_wq) {
- spin_lock_irqsave(ap->lock, flags);
-
/* EH might have kicked in while host lock is
* released.
*/
@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
} else
ata_port_freeze(ap);
}
-
- spin_unlock_irqrestore(ap->lock, flags);
} else {
if (likely(!(qc->err_mask & AC_ERR_HSM)))
ata_qc_complete(qc);
@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
}
} else {
if (in_wq) {
- spin_lock_irqsave(ap->lock, flags);
ata_sff_irq_on(ap);
ata_qc_complete(qc);
- spin_unlock_irqrestore(ap->lock, flags);
} else
ata_qc_complete(qc);
}
@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
{
struct ata_link *link = qc->dev->link;
struct ata_eh_info *ehi = &link->eh_info;
- unsigned long flags = 0;
int poll_next;
+ lockdep_assert_held(ap->lock);
+
WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
/* Make sure ata_sff_qc_issue() does not throw things
@@ -1112,14 +1106,6 @@ fsm_start:
}
}
- /* Send the CDB (atapi) or the first data block (ata pio out).
- * During the state transition, interrupt handler shouldn't
- * be invoked before the data transfer is complete and
- * hsm_task_state is changed. Hence, the following locking.
- */
- if (in_wq)
- spin_lock_irqsave(ap->lock, flags);
-
if (qc->tf.protocol == ATA_PROT_PIO) {
/* PIO data out protocol.
* send first data block.
@@ -1135,9 +1121,6 @@ fsm_start:
/* send CDB */
atapi_send_cdb(ap, qc);
- if (in_wq)
- spin_unlock_irqrestore(ap->lock, flags);
-
/* if polling, ata_sff_pio_task() handles the rest.
* otherwise, interrupt handler takes over from here.
*/
@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work)
u8 status;
int poll_next;
+ spin_lock_irq(ap->lock);
+
BUG_ON(ap->sff_pio_task_link == NULL);
/* qc can be NULL if timeout occurred */
qc = ata_qc_from_tag(ap, link->active_tag);
if (!qc) {
ap->sff_pio_task_link = NULL;
- return;
+ goto out_unlock;
}
fsm_start:
@@ -1381,11 +1366,14 @@ fsm_start:
*/
status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
if (status & ATA_BUSY) {
+ spin_unlock_irq(ap->lock);
ata_msleep(ap, 2);
+ spin_lock_irq(ap->lock);
+
status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
if (status & ATA_BUSY) {
ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
- return;
+ goto out_unlock;
}
}
@@ -1402,6 +1390,8 @@ fsm_start:
*/
if (poll_next)
goto fsm_start;
+out_unlock:
+ spin_unlock_irq(ap->lock);
}
/**
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 12fe0f3bb7e9..c8b6a780a290 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -32,6 +32,8 @@
#include <linux/libata.h>
#include <scsi/scsi_host.h>
+#include <asm/mach-rc32434/rb.h>
+
#define DRV_NAME "pata-rb532-cf"
#define DRV_VERSION "0.1.0"
#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash"
@@ -107,6 +109,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
int gpio;
struct resource *res;
struct ata_host *ah;
+ struct cf_device *pdata;
struct rb532_cf_info *info;
int ret;
@@ -122,7 +125,13 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
return -ENOENT;
}
- gpio = irq_to_gpio(irq);
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data specified\n");
+ return -EINVAL;
+ }
+
+ gpio = pdata->gpio_pin;
if (gpio < 0) {
dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
return -ENOENT;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 1dd6d3bf1098..176b59f5bc47 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -513,10 +513,15 @@ static int platform_drv_probe(struct device *_dev)
return ret;
ret = dev_pm_domain_attach(_dev, true);
- if (ret != -EPROBE_DEFER && drv->probe) {
- ret = drv->probe(dev);
- if (ret)
- dev_pm_domain_detach(_dev, true);
+ if (ret != -EPROBE_DEFER) {
+ if (drv->probe) {
+ ret = drv->probe(dev);
+ if (ret)
+ dev_pm_domain_detach(_dev, true);
+ } else {
+ /* don't fail if just dev_pm_domain_attach failed */
+ ret = 0;
+ }
}
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index a5880f4ab40e..1914c63ca8b1 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -338,7 +338,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
- bio->bi_iter.bi_size & PAGE_MASK)
+ bio->bi_iter.bi_size & ~PAGE_MASK)
goto io_error;
discard_from_brd(brd, sector, bio->bi_iter.bi_size);
goto out;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 3457ac8c03e2..55d3d1da72de 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -173,7 +173,13 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
{
struct request *rq;
+ if (mtip_check_surprise_removal(dd->pdev))
+ return NULL;
+
rq = blk_mq_alloc_request(dd->queue, 0, __GFP_RECLAIM, true);
+ if (IS_ERR(rq))
+ return NULL;
+
return blk_mq_rq_to_pdu(rq);
}
@@ -233,15 +239,9 @@ static void mtip_async_complete(struct mtip_port *port,
"Command tag %d failed due to TFE\n", tag);
}
- /* Unmap the DMA scatter list entries */
- dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, cmd->direction);
-
rq = mtip_rq_from_tag(dd, tag);
- if (unlikely(cmd->unaligned))
- up(&port->cmd_slot_unal);
-
- blk_mq_end_request(rq, status ? -EIO : 0);
+ blk_mq_complete_request(rq, status);
}
/*
@@ -581,6 +581,8 @@ static void mtip_completion(struct mtip_port *port,
dev_warn(&port->dd->pdev->dev,
"Internal command %d completed with TFE\n", tag);
+ command->comp_func = NULL;
+ command->comp_data = NULL;
complete(waiting);
}
@@ -618,8 +620,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
port = dd->port;
- set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
-
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
@@ -628,7 +628,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
cmd->comp_func(port, MTIP_TAG_INTERNAL,
cmd, PORT_IRQ_TF_ERR);
}
- goto handle_tfe_exit;
+ return;
}
/* clear the tag accumulator */
@@ -701,7 +701,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
fail_reason = "thermal shutdown";
}
if (buf[288] == 0xBF) {
- set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
+ set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
dev_info(&dd->pdev->dev,
"Drive indicates rebuild has failed. Secure erase required.\n");
fail_all_ncq_cmds = 1;
@@ -771,11 +771,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
}
}
print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
-
-handle_tfe_exit:
- /* clear eh_active */
- clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
- wake_up_interruptible(&port->svc_wait);
}
/*
@@ -1007,6 +1002,7 @@ static bool mtip_pause_ncq(struct mtip_port *port,
(fis->features == 0x27 || fis->features == 0x72 ||
fis->features == 0x62 || fis->features == 0x26))) {
clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+ clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
/* Com reset after secure erase or lowlevel format */
mtip_restart_port(port);
clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
@@ -1021,12 +1017,14 @@ static bool mtip_pause_ncq(struct mtip_port *port,
*
* @port Pointer to port data structure
* @timeout Max duration to wait (ms)
+ * @atomic gfp_t flag to indicate blockable context or not
*
* return value
* 0 Success
* -EBUSY Commands still active
*/
-static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
+static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout,
+ gfp_t atomic)
{
unsigned long to;
unsigned int n;
@@ -1037,16 +1035,21 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
to = jiffies + msecs_to_jiffies(timeout);
do {
if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
- test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
+ test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags) &&
+ atomic == GFP_KERNEL) {
msleep(20);
continue; /* svc thd is actively issuing commands */
}
- msleep(100);
+ if (atomic == GFP_KERNEL)
+ msleep(100);
+ else {
+ cpu_relax();
+ udelay(100);
+ }
+
if (mtip_check_surprise_removal(port->dd->pdev))
goto err_fault;
- if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
- goto err_fault;
/*
* Ignore s_active bit 0 of array element 0.
@@ -1099,6 +1102,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
struct mtip_cmd *int_cmd;
struct driver_data *dd = port->dd;
int rv = 0;
+ unsigned long start;
/* Make sure the buffer is 8 byte aligned. This is asic specific. */
if (buffer & 0x00000007) {
@@ -1107,6 +1111,10 @@ static int mtip_exec_internal_command(struct mtip_port *port,
}
int_cmd = mtip_get_int_command(dd);
+ if (!int_cmd) {
+ dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
+ return -EFAULT;
+ }
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
@@ -1119,7 +1127,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
if (fis->command != ATA_CMD_STANDBYNOW1) {
/* wait for io to complete if non atomic */
if (mtip_quiesce_io(port,
- MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
+ MTIP_QUIESCE_IO_TIMEOUT_MS, atomic) < 0) {
dev_warn(&dd->pdev->dev,
"Failed to quiesce IO\n");
mtip_put_int_command(dd, int_cmd);
@@ -1162,6 +1170,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
/* Populate the command header */
int_cmd->command_header->byte_count = 0;
+ start = jiffies;
+
/* Issue the command to the hardware */
mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
@@ -1170,10 +1180,12 @@ static int mtip_exec_internal_command(struct mtip_port *port,
if ((rv = wait_for_completion_interruptible_timeout(
&wait,
msecs_to_jiffies(timeout))) <= 0) {
+
if (rv == -ERESTARTSYS) { /* interrupted */
dev_err(&dd->pdev->dev,
- "Internal command [%02X] was interrupted after %lu ms\n",
- fis->command, timeout);
+ "Internal command [%02X] was interrupted after %u ms\n",
+ fis->command,
+ jiffies_to_msecs(jiffies - start));
rv = -EINTR;
goto exec_ic_exit;
} else if (rv == 0) /* timeout */
@@ -2897,6 +2909,42 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
return -EFAULT;
}
+static void mtip_softirq_done_fn(struct request *rq)
+{
+ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct driver_data *dd = rq->q->queuedata;
+
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents,
+ cmd->direction);
+
+ if (unlikely(cmd->unaligned))
+ up(&dd->port->cmd_slot_unal);
+
+ blk_mq_end_request(rq, rq->errors);
+}
+
+static void mtip_abort_cmd(struct request *req, void *data,
+ bool reserved)
+{
+ struct driver_data *dd = data;
+
+ dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
+
+ clear_bit(req->tag, dd->port->cmds_to_issue);
+ req->errors = -EIO;
+ mtip_softirq_done_fn(req);
+}
+
+static void mtip_queue_cmd(struct request *req, void *data,
+ bool reserved)
+{
+ struct driver_data *dd = data;
+
+ set_bit(req->tag, dd->port->cmds_to_issue);
+ blk_abort_request(req);
+}
+
/*
* service thread to issue queued commands
*
@@ -2909,7 +2957,7 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
static int mtip_service_thread(void *data)
{
struct driver_data *dd = (struct driver_data *)data;
- unsigned long slot, slot_start, slot_wrap;
+ unsigned long slot, slot_start, slot_wrap, to;
unsigned int num_cmd_slots = dd->slot_groups * 32;
struct mtip_port *port = dd->port;
@@ -2924,9 +2972,7 @@ static int mtip_service_thread(void *data)
* is in progress nor error handling is active
*/
wait_event_interruptible(port->svc_wait, (port->flags) &&
- !(port->flags & MTIP_PF_PAUSE_IO));
-
- set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+ (port->flags & MTIP_PF_SVC_THD_WORK));
if (kthread_should_stop() ||
test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
@@ -2936,6 +2982,8 @@ static int mtip_service_thread(void *data)
&dd->dd_flag)))
goto st_out;
+ set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+
restart_eh:
/* Demux bits: start with error handling */
if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
@@ -2946,6 +2994,32 @@ restart_eh:
if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags))
goto restart_eh;
+ if (test_bit(MTIP_PF_TO_ACTIVE_BIT, &port->flags)) {
+ to = jiffies + msecs_to_jiffies(5000);
+
+ do {
+ mdelay(100);
+ } while (atomic_read(&dd->irq_workers_active) != 0 &&
+ time_before(jiffies, to));
+
+ if (atomic_read(&dd->irq_workers_active) != 0)
+ dev_warn(&dd->pdev->dev,
+ "Completion workers still active!");
+
+ spin_lock(dd->queue->queue_lock);
+ blk_mq_all_tag_busy_iter(*dd->tags.tags,
+ mtip_queue_cmd, dd);
+ spin_unlock(dd->queue->queue_lock);
+
+ set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
+
+ if (mtip_device_reset(dd))
+ blk_mq_all_tag_busy_iter(*dd->tags.tags,
+ mtip_abort_cmd, dd);
+
+ clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
+ }
+
if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
slot = 1;
/* used to restrict the loop to one iteration */
@@ -2978,10 +3052,8 @@ restart_eh:
}
if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
- if (mtip_ftl_rebuild_poll(dd) < 0)
- set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
- &dd->dd_flag);
- clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
+ if (mtip_ftl_rebuild_poll(dd) == 0)
+ clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
}
}
@@ -3096,7 +3168,7 @@ static int mtip_hw_get_identify(struct driver_data *dd)
if (buf[288] == 0xBF) {
dev_info(&dd->pdev->dev,
"Drive indicates rebuild has failed.\n");
- /* TODO */
+ set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
}
}
@@ -3270,20 +3342,25 @@ out1:
return rv;
}
-static void mtip_standby_drive(struct driver_data *dd)
+static int mtip_standby_drive(struct driver_data *dd)
{
- if (dd->sr)
- return;
+ int rv = 0;
+ if (dd->sr || !dd->port)
+ return -ENODEV;
/*
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
- !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
- if (mtip_standby_immediate(dd->port))
+ !test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
+ !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
+ rv = mtip_standby_immediate(dd->port);
+ if (rv)
dev_warn(&dd->pdev->dev,
"STANDBY IMMEDIATE failed\n");
+ }
+ return rv;
}
/*
@@ -3296,10 +3373,6 @@ static void mtip_standby_drive(struct driver_data *dd)
*/
static int mtip_hw_exit(struct driver_data *dd)
{
- /*
- * Send standby immediate (E0h) to the drive so that it
- * saves its state.
- */
if (!dd->sr) {
/* de-initialize the port. */
mtip_deinit_port(dd->port);
@@ -3341,8 +3414,7 @@ static int mtip_hw_shutdown(struct driver_data *dd)
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
- if (!dd->sr && dd->port)
- mtip_standby_immediate(dd->port);
+ mtip_standby_drive(dd);
return 0;
}
@@ -3365,7 +3437,7 @@ static int mtip_hw_suspend(struct driver_data *dd)
* Send standby immediate (E0h) to the drive
* so that it saves its state.
*/
- if (mtip_standby_immediate(dd->port) != 0) {
+ if (mtip_standby_drive(dd) != 0) {
dev_err(&dd->pdev->dev,
"Failed standby-immediate command\n");
return -EFAULT;
@@ -3603,6 +3675,28 @@ static int mtip_block_getgeo(struct block_device *dev,
return 0;
}
+static int mtip_block_open(struct block_device *dev, fmode_t mode)
+{
+ struct driver_data *dd;
+
+ if (dev && dev->bd_disk) {
+ dd = (struct driver_data *) dev->bd_disk->private_data;
+
+ if (dd) {
+ if (test_bit(MTIP_DDF_REMOVAL_BIT,
+ &dd->dd_flag)) {
+ return -ENODEV;
+ }
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+void mtip_block_release(struct gendisk *disk, fmode_t mode)
+{
+}
+
/*
* Block device operation function.
*
@@ -3610,6 +3704,8 @@ static int mtip_block_getgeo(struct block_device *dev,
* layer.
*/
static const struct block_device_operations mtip_block_ops = {
+ .open = mtip_block_open,
+ .release = mtip_block_release,
.ioctl = mtip_block_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mtip_block_compat_ioctl,
@@ -3671,10 +3767,9 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
rq_data_dir(rq))) {
return -ENODATA;
}
- if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)))
+ if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) ||
+ test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)))
return -ENODATA;
- if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
- return -ENXIO;
}
if (rq->cmd_flags & REQ_DISCARD) {
@@ -3786,11 +3881,33 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
return 0;
}
+static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
+ bool reserved)
+{
+ struct driver_data *dd = req->q->queuedata;
+ int ret = BLK_EH_RESET_TIMER;
+
+ if (reserved)
+ goto exit_handler;
+
+ if (test_bit(req->tag, dd->port->cmds_to_issue))
+ goto exit_handler;
+
+ if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags))
+ goto exit_handler;
+
+ wake_up_interruptible(&dd->port->svc_wait);
+exit_handler:
+ return ret;
+}
+
static struct blk_mq_ops mtip_mq_ops = {
.queue_rq = mtip_queue_rq,
.map_queue = blk_mq_map_queue,
.init_request = mtip_init_cmd,
.exit_request = mtip_free_cmd,
+ .complete = mtip_softirq_done_fn,
+ .timeout = mtip_cmd_timeout,
};
/*
@@ -3857,7 +3974,6 @@ static int mtip_block_initialize(struct driver_data *dd)
mtip_hw_debugfs_init(dd);
-skip_create_disk:
memset(&dd->tags, 0, sizeof(dd->tags));
dd->tags.ops = &mtip_mq_ops;
dd->tags.nr_hw_queues = 1;
@@ -3867,12 +3983,13 @@ skip_create_disk:
dd->tags.numa_node = dd->numa_node;
dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
dd->tags.driver_data = dd;
+ dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
rv = blk_mq_alloc_tag_set(&dd->tags);
if (rv) {
dev_err(&dd->pdev->dev,
"Unable to allocate request queue\n");
- goto block_queue_alloc_init_error;
+ goto block_queue_alloc_tag_error;
}
/* Allocate the request queue. */
@@ -3887,6 +4004,7 @@ skip_create_disk:
dd->disk->queue = dd->queue;
dd->queue->queuedata = dd;
+skip_create_disk:
/* Initialize the protocol layer. */
wait_for_rebuild = mtip_hw_get_identify(dd);
if (wait_for_rebuild < 0) {
@@ -3983,8 +4101,9 @@ kthread_run_error:
read_capacity_error:
init_hw_cmds_error:
blk_cleanup_queue(dd->queue);
- blk_mq_free_tag_set(&dd->tags);
block_queue_alloc_init_error:
+ blk_mq_free_tag_set(&dd->tags);
+block_queue_alloc_tag_error:
mtip_hw_debugfs_exit(dd);
disk_index_error:
spin_lock(&rssd_index_lock);
@@ -4001,6 +4120,22 @@ protocol_init_error:
return rv;
}
+static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
+{
+ struct driver_data *dd = (struct driver_data *)data;
+ struct mtip_cmd *cmd;
+
+ if (likely(!reserv))
+ blk_mq_complete_request(rq, -ENODEV);
+ else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) {
+
+ cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
+ if (cmd->comp_func)
+ cmd->comp_func(dd->port, MTIP_TAG_INTERNAL,
+ cmd, -ENODEV);
+ }
+}
+
/*
* Block layer deinitialization function.
*
@@ -4032,12 +4167,23 @@ static int mtip_block_remove(struct driver_data *dd)
}
}
- if (!dd->sr)
- mtip_standby_drive(dd);
+ if (!dd->sr) {
+ /*
+ * Explicitly wait here for IOs to quiesce,
+ * as mtip_standby_drive usually won't wait for IOs.
+ */
+ if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS,
+ GFP_KERNEL))
+ mtip_standby_drive(dd);
+ }
else
dev_info(&dd->pdev->dev, "device %s surprise removal\n",
dd->disk->disk_name);
+ blk_mq_freeze_queue_start(dd->queue);
+ blk_mq_stop_hw_queues(dd->queue);
+ blk_mq_all_tag_busy_iter(dd->tags.tags[0], mtip_no_dev_cleanup, dd);
+
/*
* Delete our gendisk structure. This also removes the device
* from /dev
@@ -4047,7 +4193,8 @@ static int mtip_block_remove(struct driver_data *dd)
dd->bdev = NULL;
}
if (dd->disk) {
- del_gendisk(dd->disk);
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+ del_gendisk(dd->disk);
if (dd->disk->queue) {
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
@@ -4088,7 +4235,8 @@ static int mtip_block_shutdown(struct driver_data *dd)
dev_info(&dd->pdev->dev,
"Shutting down %s ...\n", dd->disk->disk_name);
- del_gendisk(dd->disk);
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+ del_gendisk(dd->disk);
if (dd->disk->queue) {
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
@@ -4433,7 +4581,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
struct driver_data *dd = pci_get_drvdata(pdev);
unsigned long flags, to;
- set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
+ set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
spin_lock_irqsave(&dev_lock, flags);
list_del_init(&dd->online_list);
@@ -4450,12 +4598,17 @@ static void mtip_pci_remove(struct pci_dev *pdev)
} while (atomic_read(&dd->irq_workers_active) != 0 &&
time_before(jiffies, to));
+ if (!dd->sr)
+ fsync_bdev(dd->bdev);
+
if (atomic_read(&dd->irq_workers_active) != 0) {
dev_warn(&dd->pdev->dev,
"Completion workers still active!\n");
}
- blk_mq_stop_hw_queues(dd->queue);
+ blk_set_queue_dying(dd->queue);
+ set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
+
/* Clean up the block layer. */
mtip_block_remove(dd);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 3274784008eb..7617888f7944 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -134,16 +134,24 @@ enum {
MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
+ MTIP_PF_TO_ACTIVE_BIT = 9, /* timeout handling */
MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) |
(1 << MTIP_PF_EH_ACTIVE_BIT) |
(1 << MTIP_PF_SE_ACTIVE_BIT) |
- (1 << MTIP_PF_DM_ACTIVE_BIT)),
+ (1 << MTIP_PF_DM_ACTIVE_BIT) |
+ (1 << MTIP_PF_TO_ACTIVE_BIT)),
MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
MTIP_PF_ISSUE_CMDS_BIT = 5,
MTIP_PF_REBUILD_BIT = 6,
MTIP_PF_SVC_THD_STOP_BIT = 8,
+ MTIP_PF_SVC_THD_WORK = ((1 << MTIP_PF_EH_ACTIVE_BIT) |
+ (1 << MTIP_PF_ISSUE_CMDS_BIT) |
+ (1 << MTIP_PF_REBUILD_BIT) |
+ (1 << MTIP_PF_SVC_THD_STOP_BIT) |
+ (1 << MTIP_PF_TO_ACTIVE_BIT)),
+
/* below are bit numbers in 'dd_flag' defined in driver_data */
MTIP_DDF_SEC_LOCK_BIT = 0,
MTIP_DDF_REMOVE_PENDING_BIT = 1,
@@ -153,6 +161,7 @@ enum {
MTIP_DDF_RESUME_BIT = 6,
MTIP_DDF_INIT_DONE_BIT = 7,
MTIP_DDF_REBUILD_FAILED_BIT = 8,
+ MTIP_DDF_REMOVAL_BIT = 9,
MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
(1 << MTIP_DDF_SEC_LOCK_BIT) |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 81ea69fee7ca..fbdddd6f94b8 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1955,7 +1955,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
osdc = &rbd_dev->rbd_client->client->osdc;
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
- GFP_ATOMIC);
+ GFP_NOIO);
if (!osd_req)
return NULL; /* ENOMEM */
@@ -2004,7 +2004,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
rbd_dev = img_request->rbd_dev;
osdc = &rbd_dev->rbd_client->client->osdc;
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
- false, GFP_ATOMIC);
+ false, GFP_NOIO);
if (!osd_req)
return NULL; /* ENOMEM */
@@ -2506,7 +2506,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
bio_chain_clone_range(&bio_list,
&bio_offset,
clone_size,
- GFP_ATOMIC);
+ GFP_NOIO);
if (!obj_request->bio_list)
goto out_unwind;
} else if (type == OBJ_REQUEST_PAGES) {
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 5cb13ca3a3ac..c53617752b93 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -76,7 +76,7 @@ static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
*/
static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
{
- struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
+ struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_NOIO);
if (!zstrm)
return NULL;
@@ -85,7 +85,7 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
* case when compressed size is larger than the original one
*/
- zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
+ zstrm->buffer = (void *)__get_free_pages(GFP_NOIO | __GFP_ZERO, 1);
if (!zstrm->private || !zstrm->buffer) {
zcomp_strm_free(comp, zstrm);
zstrm = NULL;
diff --git a/drivers/block/zram/zcomp_lz4.c b/drivers/block/zram/zcomp_lz4.c
index f2afb7e988c3..dd6083124276 100644
--- a/drivers/block/zram/zcomp_lz4.c
+++ b/drivers/block/zram/zcomp_lz4.c
@@ -10,17 +10,36 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/lz4.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include "zcomp_lz4.h"
static void *zcomp_lz4_create(void)
{
- return kzalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
+ void *ret;
+
+ /*
+ * This function can be called in swapout/fs write path
+ * so we can't use GFP_FS|IO. And it assumes we already
+ * have at least one stream in zram initialization so we
+ * don't do best effort to allocate more stream in here.
+ * A default stream will work well without further multiple
+ * streams. That's why we use NORETRY | NOWARN.
+ */
+ ret = kzalloc(LZ4_MEM_COMPRESS, GFP_NOIO | __GFP_NORETRY |
+ __GFP_NOWARN);
+ if (!ret)
+ ret = __vmalloc(LZ4_MEM_COMPRESS,
+ GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN |
+ __GFP_ZERO | __GFP_HIGHMEM,
+ PAGE_KERNEL);
+ return ret;
}
static void zcomp_lz4_destroy(void *private)
{
- kfree(private);
+ kvfree(private);
}
static int zcomp_lz4_compress(const unsigned char *src, unsigned char *dst,
diff --git a/drivers/block/zram/zcomp_lzo.c b/drivers/block/zram/zcomp_lzo.c
index da1bc47d588e..edc549920fa0 100644
--- a/drivers/block/zram/zcomp_lzo.c
+++ b/drivers/block/zram/zcomp_lzo.c
@@ -10,17 +10,36 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/lzo.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include "zcomp_lzo.h"
static void *lzo_create(void)
{
- return kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ void *ret;
+
+ /*
+ * This function can be called in swapout/fs write path
+ * so we can't use GFP_FS|IO. And it assumes we already
+ * have at least one stream in zram initialization so we
+ * don't do best effort to allocate more stream in here.
+ * A default stream will work well without further multiple
+ * streams. That's why we use NORETRY | NOWARN.
+ */
+ ret = kzalloc(LZO1X_MEM_COMPRESS, GFP_NOIO | __GFP_NORETRY |
+ __GFP_NOWARN);
+ if (!ret)
+ ret = __vmalloc(LZO1X_MEM_COMPRESS,
+ GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN |
+ __GFP_ZERO | __GFP_HIGHMEM,
+ PAGE_KERNEL);
+ return ret;
}
static void lzo_destroy(void *private)
{
- kfree(private);
+ kvfree(private);
}
static int lzo_compress(const unsigned char *src, unsigned char *dst,
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 47915d736f8d..370c2f76016d 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1325,7 +1325,6 @@ static int zram_remove(struct zram *zram)
pr_info("Removed device: %s\n", zram->disk->disk_name);
- idr_remove(&zram_index_idr, zram->disk->first_minor);
blk_cleanup_queue(zram->disk->queue);
del_gendisk(zram->disk);
put_disk(zram->disk);
@@ -1367,10 +1366,12 @@ static ssize_t hot_remove_store(struct class *class,
mutex_lock(&zram_index_mutex);
zram = idr_find(&zram_index_idr, dev_id);
- if (zram)
+ if (zram) {
ret = zram_remove(zram);
- else
+ idr_remove(&zram_index_idr, dev_id);
+ } else {
ret = -ENODEV;
+ }
mutex_unlock(&zram_index_mutex);
return ret ? ret : count;
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index fa893c3ec408..0beaa52df66b 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0489, 0xe05f) },
{ USB_DEVICE(0x0489, 0xe076) },
{ USB_DEVICE(0x0489, 0xe078) },
+ { USB_DEVICE(0x0489, 0xe095) },
{ USB_DEVICE(0x04c5, 0x1330) },
{ USB_DEVICE(0x04CA, 0x3004) },
{ USB_DEVICE(0x04CA, 0x3005) },
@@ -92,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x300d) },
{ USB_DEVICE(0x04CA, 0x300f) },
{ USB_DEVICE(0x04CA, 0x3010) },
+ { USB_DEVICE(0x04CA, 0x3014) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x021c) },
{ USB_DEVICE(0x0930, 0x0220) },
@@ -113,10 +115,12 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x13d3, 0x3393) },
+ { USB_DEVICE(0x13d3, 0x3395) },
{ USB_DEVICE(0x13d3, 0x3402) },
{ USB_DEVICE(0x13d3, 0x3408) },
{ USB_DEVICE(0x13d3, 0x3423) },
{ USB_DEVICE(0x13d3, 0x3432) },
+ { USB_DEVICE(0x13d3, 0x3472) },
{ USB_DEVICE(0x13d3, 0x3474) },
/* Atheros AR5BBU12 with sflash firmware */
@@ -144,6 +148,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
@@ -154,6 +159,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -175,10 +181,12 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 92f0ee388f9e..79107597a594 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -153,6 +153,10 @@ static const struct usb_device_id btusb_table[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM },
+ /* Toshiba Corp - Broadcom based */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM },
+
/* Intel Bluetooth USB Bootloader (RAM module) */
{ USB_DEVICE(0x8087, 0x0a5a),
.driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
@@ -192,6 +196,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
@@ -202,6 +207,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -223,10 +229,12 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 57eb935aedc7..f03792e89ae1 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -30,6 +30,7 @@
#include <linux/ti_wilink_st.h>
#include <linux/module.h>
+#include <linux/of.h>
/* Bluetooth Driver Version */
#define VERSION "1.0"
@@ -273,6 +274,14 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+static const struct of_device_id btwilink_of_match[] = {
+{
+ .compatible = "btwilink",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, btwilink_of_match);
+
static int bt_ti_probe(struct platform_device *pdev)
{
static struct ti_st *hst;
@@ -336,6 +345,8 @@ static struct platform_driver btwilink_driver = {
.remove = bt_ti_remove,
.driver = {
.name = "btwilink",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(btwilink_of_match),
},
};
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 45cc39aabeee..252142524ff2 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -136,11 +136,13 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
chip->cdev.owner = chip->pdev->driver->owner;
chip->cdev.kobj.parent = &chip->dev.kobj;
+ devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
+
return chip;
}
EXPORT_SYMBOL_GPL(tpmm_chip_alloc);
-static int tpm_dev_add_device(struct tpm_chip *chip)
+static int tpm_add_char_device(struct tpm_chip *chip)
{
int rc;
@@ -151,7 +153,6 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
chip->devname, MAJOR(chip->dev.devt),
MINOR(chip->dev.devt), rc);
- device_unregister(&chip->dev);
return rc;
}
@@ -162,16 +163,17 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
chip->devname, MAJOR(chip->dev.devt),
MINOR(chip->dev.devt), rc);
+ cdev_del(&chip->cdev);
return rc;
}
return rc;
}
-static void tpm_dev_del_device(struct tpm_chip *chip)
+static void tpm_del_char_device(struct tpm_chip *chip)
{
cdev_del(&chip->cdev);
- device_unregister(&chip->dev);
+ device_del(&chip->dev);
}
static int tpm1_chip_register(struct tpm_chip *chip)
@@ -222,7 +224,7 @@ int tpm_chip_register(struct tpm_chip *chip)
tpm_add_ppi(chip);
- rc = tpm_dev_add_device(chip);
+ rc = tpm_add_char_device(chip);
if (rc)
goto out_err;
@@ -274,6 +276,6 @@ void tpm_chip_unregister(struct tpm_chip *chip)
sysfs_remove_link(&chip->pdev->kobj, "ppi");
tpm1_chip_unregister(chip);
- tpm_dev_del_device(chip);
+ tpm_del_char_device(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 4bb9727c1047..61e64293b765 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -310,11 +310,11 @@ static int crb_acpi_remove(struct acpi_device *device)
struct device *dev = &device->dev;
struct tpm_chip *chip = dev_get_drvdata(dev);
- tpm_chip_unregister(chip);
-
if (chip->flags & TPM_CHIP_FLAG_TPM2)
tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ tpm_chip_unregister(chip);
+
return 0;
}
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
index bd72fb04225e..4e6940acf639 100644
--- a/drivers/char/tpm/tpm_eventlog.c
+++ b/drivers/char/tpm/tpm_eventlog.c
@@ -232,7 +232,7 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
{
struct tcpa_event *event = v;
struct tcpa_event temp_event;
- char *tempPtr;
+ char *temp_ptr;
int i;
memcpy(&temp_event, event, sizeof(struct tcpa_event));
@@ -242,10 +242,16 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
temp_event.event_type = do_endian_conversion(event->event_type);
temp_event.event_size = do_endian_conversion(event->event_size);
- tempPtr = (char *)&temp_event;
+ temp_ptr = (char *) &temp_event;
- for (i = 0; i < sizeof(struct tcpa_event) + temp_event.event_size; i++)
- seq_putc(m, tempPtr[i]);
+ for (i = 0; i < (sizeof(struct tcpa_event) - 1) ; i++)
+ seq_putc(m, temp_ptr[i]);
+
+ temp_ptr = (char *) v;
+
+ for (i = (sizeof(struct tcpa_event) - 1);
+ i < (sizeof(struct tcpa_event) + temp_event.event_size); i++)
+ seq_putc(m, temp_ptr[i]);
return 0;
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 39bf5820297e..4f9830c1b121 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -1097,13 +1097,15 @@ static int bcm2835_pll_divider_set_rate(struct clk_hw *hw,
struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
struct bcm2835_cprman *cprman = divider->cprman;
const struct bcm2835_pll_divider_data *data = divider->data;
- u32 cm;
- int ret;
+ u32 cm, div, max_div = 1 << A2W_PLL_DIV_BITS;
- ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
- if (ret)
- return ret;
+ div = DIV_ROUND_UP_ULL(parent_rate, rate);
+
+ div = min(div, max_div);
+ if (div == max_div)
+ div = 0;
+ cprman_write(cprman, data->a2w_reg, div);
cm = cprman_read(cprman, data->cm_reg);
cprman_write(cprman, data->cm_reg, cm | data->load_mask);
cprman_write(cprman, data->cm_reg, cm & ~data->load_mask);
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index 4563343b6420..effe2ed40733 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -11,9 +11,12 @@
*/
#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -34,8 +37,8 @@ static struct hisi_fixed_rate_clock hi6220_fixed_rate_clks[] __initdata = {
{ HI6220_PLL_BBP, "bbppll0", NULL, CLK_IS_ROOT, 245760000, },
{ HI6220_PLL_GPU, "gpupll", NULL, CLK_IS_ROOT, 1000000000,},
{ HI6220_PLL1_DDR, "ddrpll1", NULL, CLK_IS_ROOT, 1066000000,},
- { HI6220_PLL_SYS, "syspll", NULL, CLK_IS_ROOT, 1200000000,},
- { HI6220_PLL_SYS_MEDIA, "media_syspll", NULL, CLK_IS_ROOT, 1200000000,},
+ { HI6220_PLL_SYS, "syspll", NULL, CLK_IS_ROOT, 1190494208,},
+ { HI6220_PLL_SYS_MEDIA, "media_syspll", NULL, CLK_IS_ROOT, 1190494208,},
{ HI6220_DDR_SRC, "ddr_sel_src", NULL, CLK_IS_ROOT, 1200000000,},
{ HI6220_PLL_MEDIA, "media_pll", NULL, CLK_IS_ROOT, 1440000000,},
{ HI6220_PLL_DDR, "ddrpll0", NULL, CLK_IS_ROOT, 1600000000,},
@@ -68,24 +71,75 @@ static struct hisi_gate_clock hi6220_separated_gate_clks_ao[] __initdata = {
{ HI6220_TIMER7_PCLK, "timer7_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 22, 0, },
{ HI6220_TIMER8_PCLK, "timer8_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 23, 0, },
{ HI6220_UART0_PCLK, "uart0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 24, 0, },
+ { HI6220_RTC0_PCLK, "rtc0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 25, 0, },
};
+#define SOC_PERI_SCTRL_BASE_ADDR 0xF7030000 /* peri ctrl base addr */
+#define SC_PERIPH_CTRL14 0x02C
+#define SC_PERIPH_STAT1 0x094
+#define SOC_PMCTRL_BASE_ADDR 0xF7032000 /* pm ctrl base addr*/
+#define SC_PM_DDRPLL_STAT 0x18
+#define SC_PM_SYSPLL_STAT 0x28
+#define SC_PM_MEDPLL_STAT 0x38
+
+static struct hisi_clock_data *clk_data_ao;
+
static void __init hi6220_clk_ao_init(struct device_node *np)
{
- struct hisi_clock_data *clk_data_ao;
+ void __iomem *peri_base, *pm_base;
+ unsigned int freq_u, freq_l, freq, pll_stat;
+ int i;
clk_data_ao = hisi_clk_init(np, HI6220_AO_NR_CLKS);
if (!clk_data_ao)
return;
+ peri_base = ioremap(SOC_PERI_SCTRL_BASE_ADDR, 0x1000);
+ pm_base = ioremap(SOC_PMCTRL_BASE_ADDR, 0x1000);
+ /* SYSPLL is set by bootloader. Read it */
+ /* check syspll enablement status */
+ pll_stat = readl(pm_base + SC_PM_SYSPLL_STAT);
+ pr_info("SYSPLL: syspll PM status: 0x%x\n", pll_stat);
+ /* 0x2101 means to calculate clk_sys_pll */
+ writew(0x2101, peri_base + SC_PERIPH_CTRL14);
+ mdelay(1);
+ /* read back the calculated value */
+ freq_l = readw(peri_base + SC_PERIPH_STAT1);
+ freq_u = readw(peri_base + SC_PERIPH_STAT1 + 2);
+ mdelay(1);
+ freq = freq_u << 16 | freq_l;
+ pr_info("SYSPLL: syspll is read: l: 0x%04X, u: 0x%04X\n",
+ freq_l, freq_u);
+ pr_info("SYSPLL: syspll is read: 0x%X, %d\n", freq, freq);
+ if (freq == 0x00020000 || freq == 0) {
+ pr_info("SYSPLL: ERROR: read returns misterious value.\n");
+ freq = 1200000000;
+ }
+
+ /* mask off freq */
+ freq -= (freq % 100000);
+ pr_info("SYSPLL: set syspll medpll: %d\n", freq);
+
+ for (i = 0; i < ARRAY_SIZE(hi6220_fixed_rate_clks); i++) {
+ if (hi6220_fixed_rate_clks[i].id == HI6220_PLL_SYS ||
+ hi6220_fixed_rate_clks[i].id == HI6220_PLL_SYS_MEDIA) {
+ hi6220_fixed_rate_clks[i].fixed_rate = freq;
+ pr_info("SYSPLL: modified fix_rate[%d], id=%d, f=%d\n",
+ i, hi6220_fixed_rate_clks[i].id, freq);
+ }
+ }
+
hisi_clk_register_fixed_rate(hi6220_fixed_rate_clks,
- ARRAY_SIZE(hi6220_fixed_rate_clks), clk_data_ao);
+ ARRAY_SIZE(hi6220_fixed_rate_clks),
+ clk_data_ao);
hisi_clk_register_fixed_factor(hi6220_fixed_factor_clks,
- ARRAY_SIZE(hi6220_fixed_factor_clks), clk_data_ao);
+ ARRAY_SIZE(hi6220_fixed_factor_clks),
+ clk_data_ao);
hisi_clk_register_gate_sep(hi6220_separated_gate_clks_ao,
- ARRAY_SIZE(hi6220_separated_gate_clks_ao), clk_data_ao);
+ ARRAY_SIZE(hi6220_separated_gate_clks_ao),
+ clk_data_ao);
}
CLK_OF_DECLARE(hi6220_clk_ao, "hisilicon,hi6220-aoctrl", hi6220_clk_ao_init);
@@ -110,11 +164,11 @@ static const char *uart4_src[] __initdata = { "clk_tcxo", "clk_150m", };
static const char *hifi_src[] __initdata = { "syspll", "pll_media_gate", };
static struct hisi_gate_clock hi6220_separated_gate_clks_sys[] __initdata = {
- { HI6220_MMC0_CLK, "mmc0_clk", "mmc0_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 0, 0, },
+ { HI6220_MMC0_CLK, "mmc0_clk", "mmc0_rst_clk", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 0, 0, },
{ HI6220_MMC0_CIUCLK, "mmc0_ciuclk", "mmc0_smp_in", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 0, 0, },
- { HI6220_MMC1_CLK, "mmc1_clk", "mmc1_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 1, 0, },
+ { HI6220_MMC1_CLK, "mmc1_clk", "mmc1_rst_clk", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 1, 0, },
{ HI6220_MMC1_CIUCLK, "mmc1_ciuclk", "mmc1_smp_in", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 1, 0, },
- { HI6220_MMC2_CLK, "mmc2_clk", "mmc2_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 2, 0, },
+ { HI6220_MMC2_CLK, "mmc2_clk", "mmc2_rst_clk", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 2, 0, },
{ HI6220_MMC2_CIUCLK, "mmc2_ciuclk", "mmc2_smp_in", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 2, 0, },
{ HI6220_USBOTG_HCLK, "usbotg_hclk", "clk_bus", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 4, 0, },
{ HI6220_CLK_PICOPHY, "clk_picophy", "cs_dapb", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 5, 0, },
@@ -145,6 +199,12 @@ static struct hisi_gate_clock hi6220_separated_gate_clks_sys[] __initdata = {
{ HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 12, 0, },
};
+static struct hisi_gate_clock hi6220_reset_clks[] __initdata = {
+ { 0, "mmc0_rst_clk", "mmc0_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x304, 0, 0, },
+ { 0, "mmc1_rst_clk", "mmc1_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x304, 1, 0, },
+ { 0, "mmc2_rst_clk", "mmc2_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x304, 2, 0, },
+};
+
static struct hisi_mux_clock hi6220_mux_clks_sys[] __initdata = {
{ HI6220_MMC0_SRC, "mmc0_src", mmc0_src_p, ARRAY_SIZE(mmc0_src_p), CLK_SET_RATE_PARENT, 0x4, 0, 1, 0, },
{ HI6220_MMC0_SMP_IN, "mmc0_smp_in", mmc0_sample_in, ARRAY_SIZE(mmc0_sample_in), CLK_SET_RATE_PARENT, 0x4, 0, 1, 0, },
@@ -184,6 +244,9 @@ static void __init hi6220_clk_sys_init(struct device_node *np)
if (!clk_data)
return;
+ hisi_clk_register_gate(hi6220_reset_clks,
+ ARRAY_SIZE(hi6220_reset_clks), clk_data);
+
hisi_clk_register_gate_sep(hi6220_separated_gate_clks_sys,
ARRAY_SIZE(hi6220_separated_gate_clks_sys), clk_data);
@@ -192,6 +255,13 @@ static void __init hi6220_clk_sys_init(struct device_node *np)
hi6220_clk_register_divider(hi6220_div_clks_sys,
ARRAY_SIZE(hi6220_div_clks_sys), clk_data);
+
+ if (!clk_data_ao)
+ return;
+
+ /* enable high speed clock on UART1 mux */
+ clk_set_parent(clk_data->clk_data.clks[HI6220_UART1_SRC],
+ clk_data_ao->clk_data.clks[HI6220_150M]);
}
CLK_OF_DECLARE(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init);
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index abb47608713b..fe728f8dcbe4 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -718,6 +718,7 @@ static const char *const rk3188_critical_clocks[] __initconst = {
"hclk_peri",
"pclk_cpu",
"pclk_peri",
+ "hclk_cpubus"
};
static void __init rk3188_common_clk_init(struct device_node *np)
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 7e6b783e6eee..1b148694b633 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -165,7 +165,7 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkb_data = {
.core_reg = RK3368_CLKSEL_CON(0),
.div_core_shift = 0,
.div_core_mask = 0x1f,
- .mux_core_shift = 15,
+ .mux_core_shift = 7,
};
static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
@@ -218,29 +218,29 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
}
static struct rockchip_cpuclk_rate_table rk3368_cpuclkb_rates[] __initdata = {
- RK3368_CPUCLKB_RATE(1512000000, 2, 6, 6),
- RK3368_CPUCLKB_RATE(1488000000, 2, 5, 5),
- RK3368_CPUCLKB_RATE(1416000000, 2, 5, 5),
- RK3368_CPUCLKB_RATE(1200000000, 2, 4, 4),
- RK3368_CPUCLKB_RATE(1008000000, 2, 4, 4),
- RK3368_CPUCLKB_RATE( 816000000, 2, 3, 3),
- RK3368_CPUCLKB_RATE( 696000000, 2, 3, 3),
- RK3368_CPUCLKB_RATE( 600000000, 2, 2, 2),
- RK3368_CPUCLKB_RATE( 408000000, 2, 2, 2),
- RK3368_CPUCLKB_RATE( 312000000, 2, 2, 2),
+ RK3368_CPUCLKB_RATE(1512000000, 1, 5, 5),
+ RK3368_CPUCLKB_RATE(1488000000, 1, 4, 4),
+ RK3368_CPUCLKB_RATE(1416000000, 1, 4, 4),
+ RK3368_CPUCLKB_RATE(1200000000, 1, 3, 3),
+ RK3368_CPUCLKB_RATE(1008000000, 1, 3, 3),
+ RK3368_CPUCLKB_RATE( 816000000, 1, 2, 2),
+ RK3368_CPUCLKB_RATE( 696000000, 1, 2, 2),
+ RK3368_CPUCLKB_RATE( 600000000, 1, 1, 1),
+ RK3368_CPUCLKB_RATE( 408000000, 1, 1, 1),
+ RK3368_CPUCLKB_RATE( 312000000, 1, 1, 1),
};
static struct rockchip_cpuclk_rate_table rk3368_cpuclkl_rates[] __initdata = {
- RK3368_CPUCLKL_RATE(1512000000, 2, 7, 7),
- RK3368_CPUCLKL_RATE(1488000000, 2, 6, 6),
- RK3368_CPUCLKL_RATE(1416000000, 2, 6, 6),
- RK3368_CPUCLKL_RATE(1200000000, 2, 5, 5),
- RK3368_CPUCLKL_RATE(1008000000, 2, 5, 5),
- RK3368_CPUCLKL_RATE( 816000000, 2, 4, 4),
- RK3368_CPUCLKL_RATE( 696000000, 2, 3, 3),
- RK3368_CPUCLKL_RATE( 600000000, 2, 3, 3),
- RK3368_CPUCLKL_RATE( 408000000, 2, 2, 2),
- RK3368_CPUCLKL_RATE( 312000000, 2, 2, 2),
+ RK3368_CPUCLKL_RATE(1512000000, 1, 6, 6),
+ RK3368_CPUCLKL_RATE(1488000000, 1, 5, 5),
+ RK3368_CPUCLKL_RATE(1416000000, 1, 5, 5),
+ RK3368_CPUCLKL_RATE(1200000000, 1, 4, 4),
+ RK3368_CPUCLKL_RATE(1008000000, 1, 4, 4),
+ RK3368_CPUCLKL_RATE( 816000000, 1, 3, 3),
+ RK3368_CPUCLKL_RATE( 696000000, 1, 2, 2),
+ RK3368_CPUCLKL_RATE( 600000000, 1, 2, 2),
+ RK3368_CPUCLKL_RATE( 408000000, 1, 1, 1),
+ RK3368_CPUCLKL_RATE( 312000000, 1, 1, 1),
};
static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
@@ -384,10 +384,10 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
* Clock-Architecture Diagram 3
*/
- COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_usb_p, 0,
+ COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
RK3368_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3368_CLKGATE_CON(4), 6, GFLAGS),
- COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb_p, 0,
+ COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
RK3368_CLKSEL_CON(15), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK3368_CLKGATE_CON(4), 7, GFLAGS),
@@ -442,7 +442,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0,
RK3368_CLKGATE_CON(4), 13, GFLAGS),
GATE(SCLK_HDMI_CEC, "sclk_hdmi_cec", "xin32k", 0,
- RK3368_CLKGATE_CON(5), 12, GFLAGS),
+ RK3368_CLKGATE_CON(4), 12, GFLAGS),
COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
RK3368_CLKSEL_CON(21), 15, 1, MFLAGS,
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 2fe37f708dc7..813003d6ce09 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -148,6 +148,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
unsigned long div0, div1 = 0, mux_reg;
+ unsigned long flags;
/* find out the divider values to use for clock data */
while ((cfg_data->prate * 1000) != ndata->new_rate) {
@@ -156,7 +157,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
cfg_data++;
}
- spin_lock(cpuclk->lock);
+ spin_lock_irqsave(cpuclk->lock, flags);
/*
* For the selected PLL clock frequency, get the pre-defined divider
@@ -212,7 +213,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
DIV_MASK_ALL);
}
- spin_unlock(cpuclk->lock);
+ spin_unlock_irqrestore(cpuclk->lock, flags);
return 0;
}
@@ -223,6 +224,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
unsigned long div = 0, div_mask = DIV_MASK;
unsigned long mux_reg;
+ unsigned long flags;
/* find out the divider values to use for clock data */
if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
@@ -233,7 +235,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
}
}
- spin_lock(cpuclk->lock);
+ spin_lock_irqsave(cpuclk->lock, flags);
/* select mout_apll as the alternate parent */
mux_reg = readl(base + E4210_SRC_CPU);
@@ -246,7 +248,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
}
exynos_set_safe_div(base, div, div_mask);
- spin_unlock(cpuclk->lock);
+ spin_unlock_irqrestore(cpuclk->lock, flags);
return 0;
}
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 6ee91401918e..4da2af9694a2 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -98,7 +98,8 @@ static int tc_shutdown(struct clock_event_device *d)
__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
- clk_disable(tcd->clk);
+ if (!clockevent_state_detached(d))
+ clk_disable(tcd->clk);
return 0;
}
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index a92e94b40b5b..dfc3bb410b00 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -50,6 +50,8 @@
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+#define MIN_OSCR_DELTA 16
+
static void __iomem *regbase;
static cycle_t vt8500_timer_read(struct clocksource *cs)
@@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
cpu_relax();
writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
- if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
+ if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
return -ETIME;
writel(1, regbase + TIMER_IER_VAL);
@@ -151,7 +153,7 @@ static void __init vt8500_timer_init(struct device_node *np)
pr_err("%s: setup_irq failed for %s\n", __func__,
clockevent.name);
clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
- 4, 0xf0000000);
+ MIN_OSCR_DELTA * 2, 0xf0000000);
}
CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index b1f8a73e5a94..eb315b6ea7fd 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -6,6 +6,8 @@
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
depends on (ARM_CPU_TOPOLOGY || ARM64) && HAVE_CLK
+ # if CPU_THERMAL is on and THERMAL=m, ARM_BIT_LITTLE_CPUFREQ cannot be =y
+ depends on !CPU_THERMAL || THERMAL
select PM_OPP
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index c5d256caa664..c251247ae661 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -23,6 +23,7 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
+#include <linux/cpu_cooling.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -55,6 +56,7 @@ static bool bL_switching_enabled;
#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
+static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
static struct cpufreq_arm_bL_ops *arm_bL_ops;
static struct clk *clk[MAX_CLUSTERS];
static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
@@ -493,6 +495,12 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
static int bL_cpufreq_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev;
+ int cur_cluster = cpu_to_cluster(policy->cpu);
+
+ if (cur_cluster < MAX_CLUSTERS) {
+ cpufreq_cooling_unregister(cdev[cur_cluster]);
+ cdev[cur_cluster] = NULL;
+ }
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
@@ -507,6 +515,38 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
return 0;
}
+static void bL_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ struct device *cpu_dev = get_cpu_device(policy->cpu);
+ int cur_cluster = cpu_to_cluster(policy->cpu);
+ struct device_node *np;
+
+ /* Do not register a cpu_cooling device if we are in IKS mode */
+ if (cur_cluster >= MAX_CLUSTERS)
+ return;
+
+ np = of_node_get(cpu_dev->of_node);
+ if (WARN_ON(!np))
+ return;
+
+ if (of_find_property(np, "#cooling-cells", NULL)) {
+ u32 power_coefficient = 0;
+
+ of_property_read_u32(np, "dynamic-power-coefficient",
+ &power_coefficient);
+
+ cdev[cur_cluster] = of_cpufreq_power_cooling_register(np,
+ policy->related_cpus, power_coefficient, NULL);
+ if (IS_ERR(cdev[cur_cluster])) {
+ dev_err(cpu_dev,
+ "running cpufreq without cooling device: %ld\n",
+ PTR_ERR(cdev[cur_cluster]));
+ cdev[cur_cluster] = NULL;
+ }
+ }
+ of_node_put(np);
+}
+
static struct cpufreq_driver bL_cpufreq_driver = {
.name = "arm-big-little",
.flags = CPUFREQ_STICKY |
@@ -517,6 +557,7 @@ static struct cpufreq_driver bL_cpufreq_driver = {
.get = bL_cpufreq_get_rate,
.init = bL_cpufreq_init,
.exit = bL_cpufreq_exit,
+ .ready = bL_cpufreq_ready,
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 90d64081ddb3..1ceece9d6711 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -407,8 +407,13 @@ static void cpufreq_ready(struct cpufreq_policy *policy)
* thermal DT code takes care of matching them.
*/
if (of_find_property(np, "#cooling-cells", NULL)) {
- priv->cdev = of_cpufreq_cooling_register(np,
- policy->related_cpus);
+ u32 power_coefficient = 0;
+
+ of_property_read_u32(np, "dynamic-power-coefficient",
+ &power_coefficient);
+
+ priv->cdev = of_cpufreq_power_cooling_register(np,
+ policy->related_cpus, power_coefficient, NULL);
if (IS_ERR(priv->cdev)) {
dev_err(priv->cpu_dev,
"running cpufreq without cooling device: %ld\n",
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index b260576ddb12..d994b0f652d3 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -356,16 +356,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
if (!have_governor_per_policy())
cdata->gdbs_data = dbs_data;
+ policy->governor_data = dbs_data;
+
ret = sysfs_create_group(get_governor_parent_kobj(policy),
get_sysfs_attr(dbs_data));
if (ret)
goto reset_gdbs_data;
- policy->governor_data = dbs_data;
-
return 0;
reset_gdbs_data:
+ policy->governor_data = NULL;
+
if (!have_governor_per_policy())
cdata->gdbs_data = NULL;
cdata->exit(dbs_data, !policy->governor->initialized);
@@ -386,16 +388,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy,
if (!cdbs->shared || cdbs->shared->policy)
return -EBUSY;
- policy->governor_data = NULL;
if (!--dbs_data->usage_count) {
sysfs_remove_group(get_governor_parent_kobj(policy),
get_sysfs_attr(dbs_data));
+ policy->governor_data = NULL;
+
if (!have_governor_per_policy())
cdata->gdbs_data = NULL;
cdata->exit(dbs_data, policy->governor->initialized == 1);
kfree(dbs_data);
+ } else {
+ policy->governor_data = NULL;
}
free_common_dbs_info(policy, cdata);
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 1d99c97defa9..096377232747 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void)
}
}
#else
-static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq)
+static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
{
return 0;
}
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index fb16d812c8f5..1dffb13e5c2f 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -1396,9 +1396,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
}
aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
- if (!aes_dd->io_base) {
+ if (IS_ERR(aes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(aes_dd->io_base);
goto res_err;
}
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 660d8c06540b..0dadb6332f0e 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -783,7 +783,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
- clk_disable_unprepare(dd->iclk);
+ clk_disable(dd->iclk);
if (req->base.complete)
req->base.complete(&req->base, err);
@@ -796,7 +796,7 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
{
int err;
- err = clk_prepare_enable(dd->iclk);
+ err = clk_enable(dd->iclk);
if (err)
return err;
@@ -823,7 +823,7 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
dev_info(dd->dev,
"version: 0x%x\n", dd->hw_version);
- clk_disable_unprepare(dd->iclk);
+ clk_disable(dd->iclk);
}
static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
@@ -1405,12 +1405,16 @@ static int atmel_sha_probe(struct platform_device *pdev)
}
sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
- if (!sha_dd->io_base) {
+ if (IS_ERR(sha_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(sha_dd->io_base);
goto res_err;
}
+ err = clk_prepare(sha_dd->iclk);
+ if (err)
+ goto res_err;
+
atmel_sha_hw_version_init(sha_dd);
atmel_sha_get_cap(sha_dd);
@@ -1422,12 +1426,12 @@ static int atmel_sha_probe(struct platform_device *pdev)
if (IS_ERR(pdata)) {
dev_err(&pdev->dev, "platform data not available\n");
err = PTR_ERR(pdata);
- goto res_err;
+ goto iclk_unprepare;
}
}
if (!pdata->dma_slave) {
err = -ENXIO;
- goto res_err;
+ goto iclk_unprepare;
}
err = atmel_sha_dma_init(sha_dd, pdata);
if (err)
@@ -1458,6 +1462,8 @@ err_algs:
if (sha_dd->caps.has_dma)
atmel_sha_dma_cleanup(sha_dd);
err_sha_dma:
+iclk_unprepare:
+ clk_unprepare(sha_dd->iclk);
res_err:
tasklet_kill(&sha_dd->done_task);
sha_dd_err:
@@ -1484,12 +1490,7 @@ static int atmel_sha_remove(struct platform_device *pdev)
if (sha_dd->caps.has_dma)
atmel_sha_dma_cleanup(sha_dd);
- iounmap(sha_dd->io_base);
-
- clk_put(sha_dd->iclk);
-
- if (sha_dd->irq >= 0)
- free_irq(sha_dd->irq, sha_dd);
+ clk_unprepare(sha_dd->iclk);
return 0;
}
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 2c7a628d0375..bf467d7be35c 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1417,9 +1417,9 @@ static int atmel_tdes_probe(struct platform_device *pdev)
}
tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
- if (!tdes_dd->io_base) {
+ if (IS_ERR(tdes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(tdes_dd->io_base);
goto res_err;
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 8abb4bc548cc..69d4a1326fee 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -534,8 +534,8 @@ static int caam_probe(struct platform_device *pdev)
* long pointers in master configuration register
*/
clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
- MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ?
- MCFGR_LONG_PTR : 0));
+ MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE |
+ (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
/*
* Read the Compile Time paramters and SCFGR to determine
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index d89f20c04266..3d9acc53d247 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -220,6 +220,39 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
return ccp_aes_cmac_finup(req);
}
+static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
+{
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_aes_cmac_exp_ctx state;
+
+ state.null_msg = rctx->null_msg;
+ memcpy(state.iv, rctx->iv, sizeof(state.iv));
+ state.buf_count = rctx->buf_count;
+ memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+ /* 'out' may not be aligned so memcpy from local variable */
+ memcpy(out, &state, sizeof(state));
+
+ return 0;
+}
+
+static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
+{
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_aes_cmac_exp_ctx state;
+
+ /* 'in' may not be aligned so memcpy to local variable */
+ memcpy(&state, in, sizeof(state));
+
+ memset(rctx, 0, sizeof(*rctx));
+ rctx->null_msg = state.null_msg;
+ memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
+ rctx->buf_count = state.buf_count;
+ memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+ return 0;
+}
+
static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
@@ -352,10 +385,13 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
alg->final = ccp_aes_cmac_final;
alg->finup = ccp_aes_cmac_finup;
alg->digest = ccp_aes_cmac_digest;
+ alg->export = ccp_aes_cmac_export;
+ alg->import = ccp_aes_cmac_import;
alg->setkey = ccp_aes_cmac_setkey;
halg = &alg->halg;
halg->digestsize = AES_BLOCK_SIZE;
+ halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index d14b3f28e010..8ef06fad8b14 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -207,6 +207,43 @@ static int ccp_sha_digest(struct ahash_request *req)
return ccp_sha_finup(req);
}
+static int ccp_sha_export(struct ahash_request *req, void *out)
+{
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_sha_exp_ctx state;
+
+ state.type = rctx->type;
+ state.msg_bits = rctx->msg_bits;
+ state.first = rctx->first;
+ memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
+ state.buf_count = rctx->buf_count;
+ memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+ /* 'out' may not be aligned so memcpy from local variable */
+ memcpy(out, &state, sizeof(state));
+
+ return 0;
+}
+
+static int ccp_sha_import(struct ahash_request *req, const void *in)
+{
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_sha_exp_ctx state;
+
+ /* 'in' may not be aligned so memcpy to local variable */
+ memcpy(&state, in, sizeof(state));
+
+ memset(rctx, 0, sizeof(*rctx));
+ rctx->type = state.type;
+ rctx->msg_bits = state.msg_bits;
+ rctx->first = state.first;
+ memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
+ rctx->buf_count = state.buf_count;
+ memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+ return 0;
+}
+
static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
@@ -403,9 +440,12 @@ static int ccp_register_sha_alg(struct list_head *head,
alg->final = ccp_sha_final;
alg->finup = ccp_sha_finup;
alg->digest = ccp_sha_digest;
+ alg->export = ccp_sha_export;
+ alg->import = ccp_sha_import;
halg = &alg->halg;
halg->digestsize = def->digest_size;
+ halg->statesize = sizeof(struct ccp_sha_exp_ctx);
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 76a96f0f44c6..a326ec20bfa8 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx {
struct ccp_cmd cmd;
};
+struct ccp_aes_cmac_exp_ctx {
+ unsigned int null_msg;
+
+ u8 iv[AES_BLOCK_SIZE];
+
+ unsigned int buf_count;
+ u8 buf[AES_BLOCK_SIZE];
+};
+
/***** SHA related defines *****/
#define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE
#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
@@ -171,6 +180,19 @@ struct ccp_sha_req_ctx {
struct ccp_cmd cmd;
};
+struct ccp_sha_exp_ctx {
+ enum ccp_sha_type type;
+
+ u64 msg_bits;
+
+ unsigned int first;
+
+ u8 ctx[MAX_SHA_CONTEXT_SIZE];
+
+ unsigned int buf_count;
+ u8 buf[MAX_SHA_BLOCK_SIZE];
+};
+
/***** Common Context Structure *****/
struct ccp_ctx {
int (*complete)(struct crypto_async_request *req, int ret);
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 0643e3366e33..80239ae69527 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -306,7 +306,7 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
return -ENOMEM;
dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
- if (!dma->cache_pool)
+ if (!dma->padding_pool)
return -ENOMEM;
cesa->dma = dma;
@@ -420,7 +420,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
cesa->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(cesa->regs))
- return -ENOMEM;
+ return PTR_ERR(cesa->regs);
ret = mv_cesa_dev_dma_init(cesa);
if (ret)
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index eab6fe227fa0..107cd2a41cae 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -39,6 +39,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.import = sun4i_hash_import_md5,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct md5_state),
.base = {
.cra_name = "md5",
.cra_driver_name = "md5-sun4i-ss",
@@ -66,6 +67,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.import = sun4i_hash_import_sha1,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-sun4i-ss",
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 4c243c1ffc7f..790f7cadc1ed 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1440,9 +1440,9 @@ static int ux500_cryp_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
- if (!device_data->base) {
+ if (IS_ERR(device_data->base)) {
dev_err(dev, "[%s]: ioremap failed!", __func__);
- ret = -ENOMEM;
+ ret = PTR_ERR(device_data->base);
goto out;
}
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index f47d112041b2..66b1c3313e2e 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1675,9 +1675,9 @@ static int ux500_hash_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
- if (!device_data->base) {
+ if (IS_ERR(device_data->base)) {
dev_err(dev, "%s: ioremap() failed!\n", __func__);
- ret = -ENOMEM;
+ ret = PTR_ERR(device_data->base);
goto out;
}
spin_lock_init(&device_data->ctx_lock);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 370c661c7d7b..02f9aa4ebe05 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -176,6 +176,7 @@
#define AT_XDMAC_MAX_CHAN 0x20
#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
+#define AT_XDMAC_RESIDUE_MAX_RETRIES 5
#define AT_XDMAC_DMA_BUSWIDTHS\
(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
@@ -1383,8 +1384,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct at_xdmac_desc *desc, *_desc;
struct list_head *descs_list;
enum dma_status ret;
- int residue;
- u32 cur_nda, mask, value;
+ int residue, retry;
+ u32 cur_nda, check_nda, cur_ubc, mask, value;
u8 dwidth = 0;
unsigned long flags;
@@ -1421,7 +1422,42 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
cpu_relax();
}
+ /*
+ * When processing the residue, we need to read two registers but we
+ * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
+ * we stand in the descriptor list and AT_XDMAC_CUBC is used
+ * to know how many data are remaining for the current descriptor.
+ * Since the dma channel is not paused to not loose data, between the
+ * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
+ * descriptor.
+ * For that reason, after reading AT_XDMAC_CUBC, we check if we are
+ * still using the same descriptor by reading a second time
+ * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
+ * read again AT_XDMAC_CUBC.
+ * Memory barriers are used to ensure the read order of the registers.
+ * A max number of retries is set because unlikely it can never ends if
+ * we are transferring a lot of data with small buffers.
+ */
cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+ rmb();
+ cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+ for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
+ rmb();
+ check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+
+ if (likely(cur_nda == check_nda))
+ break;
+
+ cur_nda = check_nda;
+ rmb();
+ cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+ }
+
+ if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
+ ret = DMA_ERROR;
+ goto spin_unlock;
+ }
+
/*
* Remove size of all microblocks already transferred and the current
* one. Then add the remaining size to transfer of the current
@@ -1434,7 +1470,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
break;
}
- residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
+ residue += cur_ubc << dwidth;
dma_set_residue(txstate, residue);
@@ -1688,6 +1724,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
at_xdmac_remove_xfer(atchan, desc);
+ clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
spin_unlock_irqrestore(&atchan->lock, flags);
@@ -1820,6 +1857,8 @@ static int atmel_xdmac_resume(struct device *dev)
atchan = to_at_xdmac_chan(chan);
at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
if (at_xdmac_chan_is_cyclic(atchan)) {
+ if (at_xdmac_chan_is_paused(atchan))
+ at_xdmac_device_resume(chan);
at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 7067b6ddc1db..4f099ea29f83 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -536,16 +536,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
/* Called with dwc->lock held and all DMAC interrupts disabled */
static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
- u32 status_err, u32 status_xfer)
+ u32 status_block, u32 status_err, u32 status_xfer)
{
unsigned long flags;
- if (dwc->mask) {
+ if (status_block & dwc->mask) {
void (*callback)(void *param);
void *callback_param;
dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
channel_readl(dwc, LLP));
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
callback = dwc->cdesc->period_callback;
callback_param = dwc->cdesc->period_callback_param;
@@ -577,6 +578,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
channel_writel(dwc, CTL_LO, 0);
channel_writel(dwc, CTL_HI, 0);
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask);
@@ -585,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
spin_unlock_irqrestore(&dwc->lock, flags);
}
+
+ /* Re-enable interrupts */
+ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
}
/* ------------------------------------------------------------------------- */
@@ -593,10 +598,12 @@ static void dw_dma_tasklet(unsigned long data)
{
struct dw_dma *dw = (struct dw_dma *)data;
struct dw_dma_chan *dwc;
+ u32 status_block;
u32 status_xfer;
u32 status_err;
int i;
+ status_block = dma_readl(dw, RAW.BLOCK);
status_xfer = dma_readl(dw, RAW.XFER);
status_err = dma_readl(dw, RAW.ERROR);
@@ -605,16 +612,15 @@ static void dw_dma_tasklet(unsigned long data)
for (i = 0; i < dw->dma.chancnt; i++) {
dwc = &dw->chan[i];
if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
- dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
+ dwc_handle_cyclic(dw, dwc, status_block, status_err,
+ status_xfer);
else if (status_err & (1 << i))
dwc_handle_error(dw, dwc);
else if (status_xfer & (1 << i))
dwc_scan_descriptors(dw, dwc);
}
- /*
- * Re-enable interrupts.
- */
+ /* Re-enable interrupts */
channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
}
@@ -635,6 +641,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
* softirq handler.
*/
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
status = dma_readl(dw, STATUS_INT);
@@ -645,6 +652,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
/* Try to recover */
channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
+ channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
@@ -1111,6 +1119,7 @@ static void dw_dma_off(struct dw_dma *dw)
dma_writel(dw, CFG, 0);
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
@@ -1216,6 +1225,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
/* Disable interrupts */
channel_clear_bit(dw, MASK.XFER, dwc->mask);
+ channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
channel_clear_bit(dw, MASK.ERROR, dwc->mask);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1245,7 +1255,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
int dw_dma_cyclic_start(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ struct dw_dma *dw = to_dw_dma(chan->device);
unsigned long flags;
if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
@@ -1255,25 +1265,10 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
- /* Assert channel is idle */
- if (dma_readl(dw, CH_EN) & dwc->mask) {
- dev_err(chan2dev(&dwc->chan),
- "%s: BUG: Attempted to start non-idle channel\n",
- __func__);
- dwc_dump_chan_regs(dwc);
- spin_unlock_irqrestore(&dwc->lock, flags);
- return -EBUSY;
- }
-
- dma_writel(dw, CLEAR.ERROR, dwc->mask);
- dma_writel(dw, CLEAR.XFER, dwc->mask);
+ /* Enable interrupts to perform cyclic transfer */
+ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
- /* Setup DMAC channel registers */
- channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
- channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
- channel_writel(dwc, CTL_HI, 0);
-
- channel_set_bit(dw, CH_EN, dwc->mask);
+ dwc_dostart(dwc, dwc->cdesc->desc[0]);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1479,6 +1474,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
dwc_chan_disable(dw, dwc);
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask);
@@ -1567,9 +1563,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* Force dma off, just in case */
dw_dma_off(dw);
- /* Disable BLOCK interrupts as well */
- channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
-
/* Create a pool of consistent memory blocks for hardware descriptors */
dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
sizeof(struct dw_desc), 4, 0);
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index fc4156afa070..a59061e4221a 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -583,6 +583,8 @@ static void set_updater_desc(struct pxad_desc_sw *sw_desc,
(PXA_DCMD_LENGTH & sizeof(u32));
if (flags & DMA_PREP_INTERRUPT)
updater->dcmd |= PXA_DCMD_ENDIRQEN;
+ if (sw_desc->cyclic)
+ sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
}
static bool is_desc_completed(struct virt_dma_desc *vd)
@@ -673,6 +675,10 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
dev_dbg(&chan->vc.chan.dev->device,
"%s(): checking txd %p[%x]: completed=%d\n",
__func__, vd, vd->tx.cookie, is_desc_completed(vd));
+ if (to_pxad_sw_desc(vd)->cyclic) {
+ vchan_cyclic_callback(vd);
+ break;
+ }
if (is_desc_completed(vd)) {
list_del(&vd->node);
vchan_cookie_complete(vd);
@@ -1080,7 +1086,7 @@ pxad_prep_dma_cyclic(struct dma_chan *dchan,
return NULL;
pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
- dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len);
+ dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
dev_dbg(&chan->vc.chan.dev->device,
"%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
__func__, (unsigned long)buf_addr, len, period_len, dir, flags);
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 9eee13ef83a5..d87a47547ba5 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1452,7 +1452,7 @@ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
u64 chan_off;
u64 dram_base = get_dram_base(pvt, range);
u64 hole_off = f10_dhar_offset(pvt);
- u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
+ u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
if (hi_rng) {
/*
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 592af5f0cf39..53587377e672 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -435,16 +435,13 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
*/
void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
{
- int status;
-
if (!edac_dev->edac_check)
return;
- status = cancel_delayed_work(&edac_dev->work);
- if (status == 0) {
- /* workq instance might be running, wait for it */
- flush_workqueue(edac_workqueue);
- }
+ edac_dev->op_state = OP_OFFLINE;
+
+ cancel_delayed_work_sync(&edac_dev->work);
+ flush_workqueue(edac_workqueue);
}
/*
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 77ecd6a4179a..1b2c2187b347 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -586,18 +586,10 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
*/
static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
{
- int status;
-
- if (mci->op_state != OP_RUNNING_POLL)
- return;
-
- status = cancel_delayed_work(&mci->work);
- if (status == 0) {
- edac_dbg(0, "not canceled, flush the queue\n");
+ mci->op_state = OP_OFFLINE;
- /* workq instance might be running, wait for it */
- flush_workqueue(edac_workqueue);
- }
+ cancel_delayed_work_sync(&mci->work);
+ flush_workqueue(edac_workqueue);
}
/*
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index a75acea0f674..58aed67b7eba 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -880,21 +880,26 @@ static struct device_type mci_attr_type = {
int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
const struct attribute_group **groups)
{
+ char *name;
int i, err;
/*
* The memory controller needs its own bus, in order to avoid
* namespace conflicts at /sys/bus/edac.
*/
- mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
- if (!mci->bus->name)
+ name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
+ if (!name)
return -ENOMEM;
+ mci->bus->name = name;
+
edac_dbg(0, "creating bus %s\n", mci->bus->name);
err = bus_register(mci->bus);
- if (err < 0)
- goto fail_free_name;
+ if (err < 0) {
+ kfree(name);
+ return err;
+ }
/* get the /sys/devices/system/edac subsys reference */
mci->dev.type = &mci_attr_type;
@@ -961,8 +966,8 @@ fail_unregister_dimm:
device_unregister(&mci->dev);
fail_unregister_bus:
bus_unregister(mci->bus);
-fail_free_name:
- kfree(mci->bus->name);
+ kfree(name);
+
return err;
}
@@ -993,10 +998,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
void edac_unregister_sysfs(struct mem_ctl_info *mci)
{
+ const char *name = mci->bus->name;
+
edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
device_unregister(&mci->dev);
bus_unregister(mci->bus);
- kfree(mci->bus->name);
+ kfree(name);
}
static void mc_attr_release(struct device *dev)
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 2cf44b4db80c..b4b38603b804 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -274,13 +274,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
*/
static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
{
- int status;
-
edac_dbg(0, "\n");
- status = cancel_delayed_work(&pci->work);
- if (status == 0)
- flush_workqueue(edac_workqueue);
+ pci->op_state = OP_OFFLINE;
+
+ cancel_delayed_work_sync(&pci->work);
+ flush_workqueue(edac_workqueue);
}
/*
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 429309c62699..cbee3179ec08 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1117,8 +1117,8 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
n_tads, gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
- (u32)TAD_SOCK(reg),
- (u32)TAD_CH(reg),
+ (u32)(1 << TAD_SOCK(reg)),
+ (u32)TAD_CH(reg) + 1,
(u32)TAD_TGT0(reg),
(u32)TAD_TGT1(reg),
(u32)TAD_TGT2(reg),
@@ -1396,7 +1396,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
}
ch_way = TAD_CH(reg) + 1;
- sck_way = TAD_SOCK(reg) + 1;
+ sck_way = 1 << TAD_SOCK(reg);
if (ch_way == 3)
idx = addr >> 6;
@@ -1453,7 +1453,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
n_tads,
addr,
limit,
- (u32)TAD_SOCK(reg),
+ sck_way,
ch_way,
offset,
idx,
@@ -1468,18 +1468,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
offset, addr);
return -EINVAL;
}
- addr -= offset;
- /* Store the low bits [0:6] of the addr */
- ch_addr = addr & 0x7f;
- /* Remove socket wayness and remove 6 bits */
- addr >>= 6;
- addr = div_u64(addr, sck_xch);
-#if 0
- /* Divide by channel way */
- addr = addr / ch_way;
-#endif
- /* Recover the last 6 bits */
- ch_addr |= addr << 6;
+
+ ch_addr = addr - offset;
+ ch_addr >>= (6 + shiftup);
+ ch_addr /= ch_way * sck_way;
+ ch_addr <<= (6 + shiftup);
+ ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
/*
* Step 3) Decode rank
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index cf478fe6b335..efaca3b0a8ef 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -174,6 +174,16 @@ config QCOM_SCM_64
depends on QCOM_SCM && ARM64
source "drivers/firmware/broadcom/Kconfig"
+
+config REBOOT_REASON_SRAM
+ bool "Pass reboot reason to bootloader via SRAM"
+ default n
+ help
+ On many systems there is a desire to provide a reboot reason to
+ the bootloader, so that the bootloader can boot into a desired
+ mode on the next boot. This option enables support for communicating
+ this reason to the bootloader via SRAM
+
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 48dd4175297e..c1ac84532c2d 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
+obj-$(CONFIG_REBOOT_REASON_SRAM)+= reboot_reason_sram.o
obj-y += broadcom/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 756eca8c4cf8..10e6774ab2a2 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -221,7 +221,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
}
if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
- efivar_validate(name, data, size) == false) {
+ efivar_validate(vendor, name, data, size) == false) {
printk(KERN_ERR "efivars: Malformed variable content\n");
return -EINVAL;
}
@@ -447,7 +447,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
}
if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
- efivar_validate(name, data, size) == false) {
+ efivar_validate(new_var->VendorGuid, name, data,
+ size) == false) {
printk(KERN_ERR "efivars: Malformed variable content\n");
return -EINVAL;
}
@@ -540,38 +541,30 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
static int
efivar_create_sysfs_entry(struct efivar_entry *new_var)
{
- int i, short_name_size;
+ int short_name_size;
char *short_name;
- unsigned long variable_name_size;
- efi_char16_t *variable_name;
+ unsigned long utf8_name_size;
+ efi_char16_t *variable_name = new_var->var.VariableName;
int ret;
- variable_name = new_var->var.VariableName;
- variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
-
/*
- * Length of the variable bytes in ASCII, plus the '-' separator,
+ * Length of the variable bytes in UTF8, plus the '-' separator,
* plus the GUID, plus trailing NUL
*/
- short_name_size = variable_name_size / sizeof(efi_char16_t)
- + 1 + EFI_VARIABLE_GUID_LEN + 1;
-
- short_name = kzalloc(short_name_size, GFP_KERNEL);
+ utf8_name_size = ucs2_utf8size(variable_name);
+ short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
+ short_name = kmalloc(short_name_size, GFP_KERNEL);
if (!short_name)
return -ENOMEM;
- /* Convert Unicode to normal chars (assume top bits are 0),
- ala UTF-8 */
- for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
- short_name[i] = variable_name[i] & 0xFF;
- }
+ ucs2_as_utf8(short_name, variable_name, short_name_size);
+
/* This is ugly, but necessary to separate one vendor's
private variables from another's. */
-
- *(short_name + strlen(short_name)) = '-';
+ short_name[utf8_name_size] = '-';
efi_guid_to_str(&new_var->var.VendorGuid,
- short_name + strlen(short_name));
+ short_name + utf8_name_size + 1);
new_var->kobj.kset = efivars_kset;
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 70a0fb10517f..7f2ea21c730d 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -165,67 +165,133 @@ validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
}
struct variable_validate {
+ efi_guid_t vendor;
char *name;
bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
unsigned long len);
};
+/*
+ * This is the list of variables we need to validate, as well as the
+ * whitelist for what we think is safe not to default to immutable.
+ *
+ * If it has a validate() method that's not NULL, it'll go into the
+ * validation routine. If not, it is assumed valid, but still used for
+ * whitelisting.
+ *
+ * Note that it's sorted by {vendor,name}, but globbed names must come after
+ * any other name with the same prefix.
+ */
static const struct variable_validate variable_validate[] = {
- { "BootNext", validate_uint16 },
- { "BootOrder", validate_boot_order },
- { "DriverOrder", validate_boot_order },
- { "Boot*", validate_load_option },
- { "Driver*", validate_load_option },
- { "ConIn", validate_device_path },
- { "ConInDev", validate_device_path },
- { "ConOut", validate_device_path },
- { "ConOutDev", validate_device_path },
- { "ErrOut", validate_device_path },
- { "ErrOutDev", validate_device_path },
- { "Timeout", validate_uint16 },
- { "Lang", validate_ascii_string },
- { "PlatformLang", validate_ascii_string },
- { "", NULL },
+ { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
+ { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
+ { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
+ { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
+ { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
+ { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
+ { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
+ { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
+ { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
+ { LINUX_EFI_CRASH_GUID, "*", NULL },
+ { NULL_GUID, "", NULL },
};
+static bool
+variable_matches(const char *var_name, size_t len, const char *match_name,
+ int *match)
+{
+ for (*match = 0; ; (*match)++) {
+ char c = match_name[*match];
+ char u = var_name[*match];
+
+ /* Wildcard in the matching name means we've matched */
+ if (c == '*')
+ return true;
+
+ /* Case sensitive match */
+ if (!c && *match == len)
+ return true;
+
+ if (c != u)
+ return false;
+
+ if (!c)
+ return true;
+ }
+ return true;
+}
+
bool
-efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len)
+efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
+ unsigned long data_size)
{
int i;
- u16 *unicode_name = var_name;
+ unsigned long utf8_size;
+ u8 *utf8_name;
- for (i = 0; variable_validate[i].validate != NULL; i++) {
- const char *name = variable_validate[i].name;
- int match;
+ utf8_size = ucs2_utf8size(var_name);
+ utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
+ if (!utf8_name)
+ return false;
- for (match = 0; ; match++) {
- char c = name[match];
- u16 u = unicode_name[match];
+ ucs2_as_utf8(utf8_name, var_name, utf8_size);
+ utf8_name[utf8_size] = '\0';
- /* All special variables are plain ascii */
- if (u > 127)
- return true;
+ for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
+ const char *name = variable_validate[i].name;
+ int match = 0;
- /* Wildcard in the matching name means we've matched */
- if (c == '*')
- return variable_validate[i].validate(var_name,
- match, data, len);
+ if (efi_guidcmp(vendor, variable_validate[i].vendor))
+ continue;
- /* Case sensitive match */
- if (c != u)
+ if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
+ if (variable_validate[i].validate == NULL)
break;
-
- /* Reached the end of the string while matching */
- if (!c)
- return variable_validate[i].validate(var_name,
- match, data, len);
+ kfree(utf8_name);
+ return variable_validate[i].validate(var_name, match,
+ data, data_size);
}
}
-
+ kfree(utf8_name);
return true;
}
EXPORT_SYMBOL_GPL(efivar_validate);
+bool
+efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
+ size_t len)
+{
+ int i;
+ bool found = false;
+ int match = 0;
+
+ /*
+ * Check if our variable is in the validated variables list
+ */
+ for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
+ if (efi_guidcmp(variable_validate[i].vendor, vendor))
+ continue;
+
+ if (variable_matches(var_name, len,
+ variable_validate[i].name, &match)) {
+ found = true;
+ break;
+ }
+ }
+
+ /*
+ * If it's in our list, it is removable.
+ */
+ return found;
+}
+EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
+
static efi_status_t
check_var_size(u32 attributes, unsigned long size)
{
@@ -852,7 +918,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
*set = false;
- if (efivar_validate(name, data, *size) == false)
+ if (efivar_validate(*vendor, name, data, *size) == false)
return -EINVAL;
/*
diff --git a/drivers/firmware/reboot_reason_sram.c b/drivers/firmware/reboot_reason_sram.c
new file mode 100644
index 000000000000..af87b6d34f77
--- /dev/null
+++ b/drivers/firmware/reboot_reason_sram.c
@@ -0,0 +1,107 @@
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/reboot.h>
+
+/* Types of reasons */
+enum {
+ NONE,
+ BOOTLOADER,
+ RECOVERY,
+ OEM,
+ MAX_REASONS
+};
+
+static u32 reasons[MAX_REASONS];
+static void __iomem *reboot_reason_val_addr;
+static struct notifier_block reboot_nb;
+
+static int reboot_reason(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ char *cmd = (char *)data;
+ u32 reason = reasons[NONE];
+
+ if (!reboot_reason_val_addr)
+ return NOTIFY_DONE;
+
+ if (cmd != NULL) {
+ if (!strncmp(cmd, "bootloader", 10))
+ reason = reasons[BOOTLOADER];
+ else if (!strncmp(cmd, "recovery", 8))
+ reason = reasons[RECOVERY];
+ else if (!strncmp(cmd, "oem-", 4)) {
+ unsigned long code;
+
+ if (!kstrtoul(cmd+4, 0, &code))
+ reason = reasons[OEM] | (code & 0xff);
+ }
+ }
+
+ if (reason != -1)
+ writel(reason, reboot_reason_val_addr);
+ return NOTIFY_DONE;
+}
+
+static int reboot_reason_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ u32 val;
+ int i;
+
+ /* initialize the reasons */
+ for (i = 0; i < MAX_REASONS; i++)
+ reasons[i] = -1;
+
+ /* Try to grab the reason io address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reboot_reason_val_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(reboot_reason_val_addr))
+ return PTR_ERR(reboot_reason_val_addr);
+
+ /* initialize specified reasons from DT */
+ if (!of_property_read_u32(pdev->dev.of_node, "reason,none", &val))
+ reasons[NONE] = val;
+ if (!of_property_read_u32(pdev->dev.of_node, "reason,bootloader", &val))
+ reasons[BOOTLOADER] = val;
+ if (!of_property_read_u32(pdev->dev.of_node, "reason,recovery", &val))
+ reasons[RECOVERY] = val;
+ if (!of_property_read_u32(pdev->dev.of_node, "reason,oem", &val))
+ reasons[OEM] = val;
+
+ /* Install the notifier */
+ reboot_nb.notifier_call = reboot_reason;
+ reboot_nb.priority = 256;
+ if (register_reboot_notifier(&reboot_nb)) {
+ dev_err(&pdev->dev,
+ "failed to setup restart handler.\n");
+ }
+ return 0;
+}
+
+int reboot_reason_remove(struct platform_device *pdev)
+{
+ unregister_reboot_notifier(&reboot_nb);
+ return 0;
+}
+
+static const struct of_device_id reboot_reason_of_match[] = {
+ { .compatible = "linux,reboot-reason-sram", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, reboot_reason_of_match);
+
+static struct platform_driver reboot_reason_driver = {
+ .driver = {
+ .name = "reboot-reason-sram",
+ .of_match_table = reboot_reason_of_match,
+ },
+ .probe = reboot_reason_probe,
+ .remove = reboot_reason_remove,
+};
+module_platform_driver(reboot_reason_driver);
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index e9ed439a5b65..66386b42a1f4 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -2,5 +2,5 @@
# taken to initialize them in the correct order. Link order is the only way
# to ensure this currently.
obj-$(CONFIG_TEGRA_HOST1X) += host1x/
-obj-y += drm/ vga/
+obj-y += drm/ vga/ arm/
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/
diff --git a/drivers/gpu/arm/Kconfig b/drivers/gpu/arm/Kconfig
new file mode 100644
index 000000000000..255cc81c7d23
--- /dev/null
+++ b/drivers/gpu/arm/Kconfig
@@ -0,0 +1 @@
+source "drivers/gpu/arm/utgard/Kconfig"
diff --git a/drivers/gpu/arm/Makefile b/drivers/gpu/arm/Makefile
new file mode 100644
index 000000000000..e4dcf28c56f4
--- /dev/null
+++ b/drivers/gpu/arm/Makefile
@@ -0,0 +1 @@
+obj-y += utgard/
diff --git a/drivers/gpu/arm/utgard/Kbuild b/drivers/gpu/arm/utgard/Kbuild
new file mode 100644
index 000000000000..bf247bcd3e79
--- /dev/null
+++ b/drivers/gpu/arm/utgard/Kbuild
@@ -0,0 +1,207 @@
+#
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+# This file is called by the Linux build system.
+
+# set up defaults if not defined by the user
+TIMESTAMP ?= default
+OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB ?= 16
+USING_GPU_UTILIZATION ?= 0
+PROFILING_SKIP_PP_JOBS ?= 0
+PROFILING_SKIP_PP_AND_GP_JOBS ?= 0
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP ?= 0
+MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED ?= 0
+MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS ?= 0
+MALI_UPPER_HALF_SCHEDULING ?= 1
+MALI_ENABLE_CPU_CYCLES ?= 0
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The in-tree driver will only use the GPL releases.
+ccflags-y += -I$(src)/linux/license/gpl
+
+ifeq ($(USING_GPU_UTILIZATION), 1)
+ ifeq ($(USING_DVFS), 1)
+ $(error USING_GPU_UTILIZATION conflict with USING_DVFS you can read the Integration Guide to choose which one do you need)
+ endif
+endif
+
+mali-y += \
+ linux/mali_osk_atomics.o \
+ linux/mali_osk_irq.o \
+ linux/mali_osk_wq.o \
+ linux/mali_osk_locks.o \
+ linux/mali_osk_wait_queue.o \
+ linux/mali_osk_low_level_mem.o \
+ linux/mali_osk_math.o \
+ linux/mali_osk_memory.o \
+ linux/mali_osk_misc.o \
+ linux/mali_osk_mali.o \
+ linux/mali_osk_notification.o \
+ linux/mali_osk_time.o \
+ linux/mali_osk_timers.o \
+ linux/mali_osk_bitmap.o
+
+mali-y += linux/mali_memory.o linux/mali_memory_os_alloc.o
+mali-y += linux/mali_memory_external.o
+mali-y += linux/mali_memory_block_alloc.o
+mali-y += linux/mali_memory_swap_alloc.o
+
+mali-y += \
+ linux/mali_memory_manager.o \
+ linux/mali_memory_virtual.o \
+ linux/mali_memory_util.o \
+ linux/mali_memory_cow.o \
+ linux/mali_memory_defer_bind.o
+
+mali-y += \
+ linux/mali_ukk_mem.o \
+ linux/mali_ukk_gp.o \
+ linux/mali_ukk_pp.o \
+ linux/mali_ukk_core.o \
+ linux/mali_ukk_soft_job.o \
+ linux/mali_ukk_timeline.o
+
+# Source files which always are included in a build
+mali-y += \
+ common/mali_kernel_core.o \
+ linux/mali_kernel_linux.o \
+ common/mali_session.o \
+ linux/mali_device_pause_resume.o \
+ common/mali_kernel_vsync.o \
+ linux/mali_ukk_vsync.o \
+ linux/mali_kernel_sysfs.o \
+ common/mali_mmu.o \
+ common/mali_mmu_page_directory.o \
+ common/mali_mem_validation.o \
+ common/mali_hw_core.o \
+ common/mali_gp.o \
+ common/mali_pp.o \
+ common/mali_pp_job.o \
+ common/mali_gp_job.o \
+ common/mali_soft_job.o \
+ common/mali_scheduler.o \
+ common/mali_executor.o \
+ common/mali_group.o \
+ common/mali_dlbu.o \
+ common/mali_broadcast.o \
+ common/mali_pm.o \
+ common/mali_pmu.o \
+ common/mali_user_settings_db.o \
+ common/mali_kernel_utilization.o \
+ common/mali_control_timer.o \
+ common/mali_l2_cache.o \
+ common/mali_timeline.o \
+ common/mali_timeline_fence_wait.o \
+ common/mali_timeline_sync_fence.o \
+ common/mali_spinlock_reentrant.o \
+ common/mali_pm_domain.o \
+ linux/mali_osk_pm.o \
+ linux/mali_pmu_power_up_down.o \
+ __malidrv_build_info.o
+
+EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1
+mali-y += platform/hikey/mali_hikey.o
+
+mali-$(CONFIG_MALI400_PROFILING) += linux/mali_ukk_profiling.o
+mali-$(CONFIG_MALI400_PROFILING) += linux/mali_osk_profiling.o
+
+mali-$(CONFIG_MALI400_INTERNAL_PROFILING) += linux/mali_profiling_internal.o timestamp-$(TIMESTAMP)/mali_timestamp.o
+ccflags-$(CONFIG_MALI400_INTERNAL_PROFILING) += -I$(src)/timestamp-$(TIMESTAMP)
+
+mali-$(CONFIG_DMA_SHARED_BUFFER) += linux/mali_memory_dma_buf.o
+mali-$(CONFIG_SYNC) += linux/mali_sync.o
+ccflags-$(CONFIG_SYNC) += -Idrivers/staging/android
+
+mali-$(CONFIG_MALI400_UMP) += linux/mali_memory_ump.o
+
+mali-$(CONFIG_MALI_DVFS) += common/mali_dvfs_policy.o
+
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_MALI400) := mali.o
+
+ccflags-y += $(EXTRA_DEFINES)
+
+# Set up our defines, which will be passed to gcc
+ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP)
+ccflags-y += -DMALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED=$(MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED)
+ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS)
+ccflags-y += -DMALI_STATE_TRACKING=1
+ccflags-y += -DMALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+ccflags-y += -DUSING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+ccflags-y += -DMALI_ENABLE_CPU_CYCLES=$(MALI_ENABLE_CPU_CYCLES)
+
+ifeq ($(MALI_UPPER_HALF_SCHEDULING),1)
+ ccflags-y += -DMALI_UPPER_HALF_SCHEDULING
+endif
+
+ccflags-$(CONFIG_MALI400_UMP) += -I$(src)/../../ump/include/ump
+ccflags-$(CONFIG_MALI400_DEBUG) += -DDEBUG
+
+# Use our defines when compiling
+ccflags-y += -I$(src) -I$(src)/include -I$(src)/common -I$(src)/linux -I$(src)/platform -Wno-date-time
+
+# Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available
+MALI_RELEASE_NAME=$(shell cat $(src)/.version 2> /dev/null)
+
+SVN_INFO = (cd $(src); svn info 2>/dev/null)
+
+ifneq ($(shell $(SVN_INFO) 2>/dev/null),)
+# SVN detected
+SVN_REV := $(shell $(SVN_INFO) | grep '^Revision: '| sed -e 's/^Revision: //' 2>/dev/null)
+DRIVER_REV := $(MALI_RELEASE_NAME)-r$(SVN_REV)
+CHANGE_DATE := $(shell $(SVN_INFO) | grep '^Last Changed Date: ' | cut -d: -f2- | cut -b2-)
+CHANGED_REVISION := $(shell $(SVN_INFO) | grep '^Last Changed Rev: ' | cut -d: -f2- | cut -b2-)
+REPO_URL := $(shell $(SVN_INFO) | grep '^URL: ' | cut -d: -f2- | cut -b2-)
+
+else # SVN
+GIT_REV := $(shell cd $(src); git describe --always 2>/dev/null)
+ifneq ($(GIT_REV),)
+# Git detected
+DRIVER_REV := $(MALI_RELEASE_NAME)-$(GIT_REV)
+CHANGE_DATE := $(shell cd $(src); git log -1 --format="%ci")
+CHANGED_REVISION := $(GIT_REV)
+REPO_URL := $(shell cd $(src); git describe --all --always 2>/dev/null)
+
+else # Git
+# No Git or SVN detected
+DRIVER_REV := $(MALI_RELEASE_NAME)
+CHANGE_DATE := $(MALI_RELEASE_NAME)
+CHANGED_REVISION := $(MALI_RELEASE_NAME)
+endif
+endif
+
+ccflags-y += -DSVN_REV_STRING=\"$(DRIVER_REV)\"
+
+VERSION_STRINGS :=
+VERSION_STRINGS += API_VERSION=$(shell cd $(src); grep "\#define _MALI_API_VERSION" $(FILES_PREFIX)include/linux/mali/mali_utgard_uk_types.h | cut -d' ' -f 3 )
+VERSION_STRINGS += REPO_URL=$(REPO_URL)
+VERSION_STRINGS += REVISION=$(DRIVER_REV)
+VERSION_STRINGS += CHANGED_REVISION=$(CHANGED_REVISION)
+VERSION_STRINGS += CHANGE_DATE=$(CHANGE_DATE)
+VERSION_STRINGS += BUILD_DATE=$(shell date)
+ifdef CONFIG_MALI400_DEBUG
+VERSION_STRINGS += BUILD=debug
+else
+VERSION_STRINGS += BUILD=release
+endif
+VERSION_STRINGS += TARGET_PLATFORM=$(TARGET_PLATFORM)
+VERSION_STRINGS += MALI_PLATFORM=$(MALI_PLATFORM)
+VERSION_STRINGS += KDIR=$(KDIR)
+VERSION_STRINGS += OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+VERSION_STRINGS += USING_UMP=$(CONFIG_MALI400_UMP)
+VERSION_STRINGS += USING_PROFILING=$(CONFIG_MALI400_PROFILING)
+VERSION_STRINGS += USING_INTERNAL_PROFILING=$(CONFIG_MALI400_INTERNAL_PROFILING)
+VERSION_STRINGS += USING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+VERSION_STRINGS += USING_DVFS=$(CONFIG_MALI_DVFS)
+VERSION_STRINGS += MALI_UPPER_HALF_SCHEDULING=$(MALI_UPPER_HALF_SCHEDULING)
+
+# Create file with Mali driver configuration
+$(src)/__malidrv_build_info.c:
+ @echo 'const char *__malidrv_build_info(void) { return "malidrv: $(VERSION_STRINGS)";}' > $(src)/__malidrv_build_info.c
diff --git a/drivers/gpu/arm/utgard/Kconfig b/drivers/gpu/arm/utgard/Kconfig
new file mode 100644
index 000000000000..e174e572d182
--- /dev/null
+++ b/drivers/gpu/arm/utgard/Kconfig
@@ -0,0 +1,118 @@
+config MALI400
+ tristate "Mali-300/400/450 support"
+ depends on ARM || ARM64
+ select DMA_SHARED_BUFFER
+ ---help---
+ This enables support for the ARM Mali-300, Mali-400, and Mali-450
+ GPUs.
+
+ To compile this driver as a module, choose M here: the module will be
+ called mali.
+
+config MALI450
+ bool "Enable Mali-450 support"
+ depends on MALI400
+ ---help---
+ This enables support for Mali-450 specific features.
+
+config MALI470
+ bool "Enable Mali-470 support"
+ depends on MALI400
+ ---help---
+ This enables support for Mali-470 specific features.
+
+config MALI400_DEBUG
+ bool "Enable debug in Mali driver"
+ depends on MALI400
+ ---help---
+ This enabled extra debug checks and messages in the Mali driver.
+
+config MALI400_PROFILING
+ bool "Enable Mali profiling"
+ depends on MALI400
+ select TRACEPOINTS
+ default y
+ ---help---
+ This enables gator profiling of Mali GPU events.
+
+config MALI400_INTERNAL_PROFILING
+ bool "Enable internal Mali profiling API"
+ depends on MALI400_PROFILING
+ default n
+ ---help---
+ This enables the internal legacy Mali profiling API.
+
+config MALI400_UMP
+ bool "Enable UMP support"
+ depends on MALI400
+ ---help---
+ This enables support for the UMP memory sharing API in the Mali driver.
+
+config MALI_DVFS
+ bool "Enable Mali dynamically frequency change"
+ depends on MALI400
+ default y
+ ---help---
+ This enables support for dynamic change frequency of Mali with the goal of lowering power consumption.
+
+config MALI_DMA_BUF_MAP_ON_ATTACH
+ bool "Map dma-buf attachments on attach"
+ depends on MALI400 && DMA_SHARED_BUFFER
+ default y
+ ---help---
+ This makes the Mali driver map dma-buf attachments after doing
+ attach. If this is not set the dma-buf attachments will be mapped for
+ every time the GPU need to access the buffer.
+
+ Mapping for each access can cause lower performance.
+
+config MALI_SHARED_INTERRUPTS
+ bool "Support for shared interrupts"
+ depends on MALI400
+ default n
+ ---help---
+ Adds functionality required to properly support shared interrupts. Without this support,
+ the device driver will fail during insmod if it detects shared interrupts. This also
+ works when the GPU is not using shared interrupts, but might have a slight performance
+ impact.
+
+config MALI_PMU_PARALLEL_POWER_UP
+ bool "Power up Mali PMU domains in parallel"
+ depends on MALI400
+ default n
+ ---help---
+ This makes the Mali driver power up all PMU power domains in parallel, instead of
+ powering up domains one by one, with a slight delay in between. Powering on all power
+ domains at the same time may cause peak currents higher than what some systems can handle.
+ These systems must not enable this option.
+
+config MALI_DT
+ bool "Using device tree to initialize module"
+ depends on MALI400 && OF
+ default n
+ ---help---
+ This enable the Mali driver to choose the device tree path to get platform resoures
+ and disable the old config method. Mali driver could run on the platform which the
+ device tree is enabled in kernel and corresponding hardware description is implemented
+ properly in device DTS file.
+
+config MALI_PLAT_SPECIFIC_DT
+ bool "Platform specific Device Tree is being used"
+ depends on MALI_DT
+ default n
+ ---help---
+ This is a pragmatic approach for some platforms which make
+ use of a device tree entry that does not strictly comply to
+ what the standard Utgard driver expects to find, but have
+ their platform data implemented the old way. Such platforms
+ should be converted to using the Device Tree so this
+ configuration option can be removed.
+
+config MALI_QUIET
+ bool "Make Mali driver very quiet"
+ depends on MALI400 && !MALI400_DEBUG
+ default n
+ ---help---
+ This forces the Mali driver to never print any messages.
+
+ If unsure, say N.
diff --git a/drivers/gpu/arm/utgard/Makefile b/drivers/gpu/arm/utgard/Makefile
new file mode 100644
index 000000000000..44c7bb83981a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/Makefile
@@ -0,0 +1,190 @@
+#
+# Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+USE_UMPV2=0
+USING_PROFILING ?= 1
+USING_INTERNAL_PROFILING ?= 0
+USING_DVFS ?= 1
+MALI_HEATMAPS_ENABLED ?= 0
+MALI_DMA_BUF_MAP_ON_ATTACH ?= 1
+MALI_PMU_PARALLEL_POWER_UP ?= 0
+USING_DT ?= 0
+MALI_MEM_SWAP_TRACKING ?= 0
+
+# The Makefile sets up "arch" based on the CONFIG, creates the version info
+# string and the __malidrv_build_info.c file, and then call the Linux build
+# system to actually build the driver. After that point the Kbuild file takes
+# over.
+
+# set up defaults if not defined by the user
+ARCH ?= arm
+
+OSKOS=linux
+FILES_PREFIX=
+
+check_cc2 = \
+ $(shell if $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; \
+ then \
+ echo "$(2)"; \
+ else \
+ echo "$(3)"; \
+ fi ;)
+
+# This conditional makefile exports the global definition ARM_INTERNAL_BUILD. Customer releases will not include arm_internal.mak
+-include ../../../arm_internal.mak
+
+# Give warning of old config parameters are used
+ifneq ($(CONFIG),)
+$(warning "You have specified the CONFIG variable which is no longer in used. Use TARGET_PLATFORM instead.")
+endif
+
+ifneq ($(CPU),)
+$(warning "You have specified the CPU variable which is no longer in used. Use TARGET_PLATFORM instead.")
+endif
+
+# Include the mapping between TARGET_PLATFORM and KDIR + MALI_PLATFORM
+-include MALI_CONFIGURATION
+export KDIR ?= $(KDIR-$(TARGET_PLATFORM))
+export MALI_PLATFORM ?= $(MALI_PLATFORM-$(TARGET_PLATFORM))
+
+ifneq ($(TARGET_PLATFORM),)
+ifeq ($(MALI_PLATFORM),)
+$(error "Invalid TARGET_PLATFORM: $(TARGET_PLATFORM)")
+endif
+endif
+
+# validate lookup result
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(TARGET_PLATFORM))
+endif
+
+ifeq ($(USING_GPU_UTILIZATION), 1)
+ ifeq ($(USING_DVFS), 1)
+ $(error USING_GPU_UTILIZATION conflict with USING_DVFS you can read the Integration Guide to choose which one do you need)
+ endif
+endif
+
+ifeq ($(USING_UMP),1)
+export CONFIG_MALI400_UMP=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_UMP=1
+ifeq ($(USE_UMPV2),1)
+UMP_SYMVERS_FILE ?= ../umpv2/Module.symvers
+else
+UMP_SYMVERS_FILE ?= ../ump/Module.symvers
+endif
+KBUILD_EXTRA_SYMBOLS = $(realpath $(UMP_SYMVERS_FILE))
+$(warning $(KBUILD_EXTRA_SYMBOLS))
+endif
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+include $(KDIR)/.config
+
+ifeq ($(ARCH), arm)
+# when compiling for ARM we're cross compiling
+export CROSS_COMPILE ?= $(call check_cc2, arm-linux-gnueabi-gcc, arm-linux-gnueabi-, arm-none-linux-gnueabi-)
+endif
+
+# report detected/selected settings
+ifdef ARM_INTERNAL_BUILD
+$(warning TARGET_PLATFORM $(TARGET_PLATFORM))
+$(warning KDIR $(KDIR))
+$(warning MALI_PLATFORM $(MALI_PLATFORM))
+endif
+
+# Set up build config
+export CONFIG_MALI400=m
+export CONFIG_MALI450=y
+export CONFIG_MALI470=y
+
+export EXTRA_DEFINES += -DCONFIG_MALI400=1
+export EXTRA_DEFINES += -DCONFIG_MALI450=1
+export EXTRA_DEFINES += -DCONFIG_MALI470=1
+
+ifneq ($(MALI_PLATFORM),)
+export EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1
+export MALI_PLATFORM_FILES = $(wildcard platform/$(MALI_PLATFORM)/*.c)
+endif
+
+ifeq ($(USING_PROFILING),1)
+ifeq ($(CONFIG_TRACEPOINTS),)
+$(warning CONFIG_TRACEPOINTS required for profiling)
+else
+export CONFIG_MALI400_PROFILING=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_PROFILING=1
+ifeq ($(USING_INTERNAL_PROFILING),1)
+export CONFIG_MALI400_INTERNAL_PROFILING=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_INTERNAL_PROFILING=1
+endif
+ifeq ($(MALI_HEATMAPS_ENABLED),1)
+export MALI_HEATMAPS_ENABLED=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_HEATMAPS_ENABLED
+endif
+endif
+endif
+
+ifeq ($(MALI_DMA_BUF_MAP_ON_ATTACH),1)
+export CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DMA_BUF_MAP_ON_ATTACH
+endif
+
+ifeq ($(MALI_SHARED_INTERRUPTS),1)
+export CONFIG_MALI_SHARED_INTERRUPTS=y
+export EXTRA_DEFINES += -DCONFIG_MALI_SHARED_INTERRUPTS
+endif
+
+ifeq ($(USING_DVFS),1)
+export CONFIG_MALI_DVFS=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DVFS
+endif
+
+ifeq ($(MALI_PMU_PARALLEL_POWER_UP),1)
+export CONFIG_MALI_PMU_PARALLEL_POWER_UP=y
+export EXTRA_DEFINES += -DCONFIG_MALI_PMU_PARALLEL_POWER_UP
+endif
+
+ifdef CONFIG_OF
+ifeq ($(USING_DT),1)
+export CONFIG_MALI_DT=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DT
+endif
+endif
+
+ifneq ($(BUILD),release)
+# Debug
+export CONFIG_MALI400_DEBUG=y
+else
+# Release
+ifeq ($(MALI_QUIET),1)
+export CONFIG_MALI_QUIET=y
+export EXTRA_DEFINES += -DCONFIG_MALI_QUIET
+endif
+endif
+
+ifeq ($(MALI_SKIP_JOBS),1)
+EXTRA_DEFINES += -DPROFILING_SKIP_PP_JOBS=1 -DPROFILING_SKIP_GP_JOBS=1
+endif
+
+ifeq ($(MALI_MEM_SWAP_TRACKING),1)
+EXTRA_DEFINES += -DMALI_MEM_SWAP_TRACKING=1
+endif
+
+all: $(UMP_SYMVERS_FILE)
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules
+ @rm $(FILES_PREFIX)__malidrv_build_info.c $(FILES_PREFIX)__malidrv_build_info.o
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+
+kernelrelease:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) kernelrelease
+
+export CONFIG KBUILD_EXTRA_SYMBOLS
diff --git a/drivers/gpu/arm/utgard/common/mali_broadcast.c b/drivers/gpu/arm/utgard/common/mali_broadcast.c
new file mode 100644
index 000000000000..136db61ace4a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_broadcast.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_broadcast.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+#define MALI_BROADCAST_REGISTER_SIZE 0x1000
+#define MALI_BROADCAST_REG_BROADCAST_MASK 0x0
+#define MALI_BROADCAST_REG_INTERRUPT_MASK 0x4
+
+struct mali_bcast_unit {
+ struct mali_hw_core hw_core;
+ u32 current_mask;
+};
+
+struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource)
+{
+ struct mali_bcast_unit *bcast_unit = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(resource);
+ MALI_DEBUG_PRINT(2, ("Broadcast: Creating Mali Broadcast unit: %s\n",
+ resource->description));
+
+ bcast_unit = _mali_osk_malloc(sizeof(struct mali_bcast_unit));
+ if (NULL == bcast_unit) {
+ MALI_PRINT_ERROR(("Broadcast: Failed to allocate memory for Broadcast unit\n"));
+ return NULL;
+ }
+
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&bcast_unit->hw_core,
+ resource, MALI_BROADCAST_REGISTER_SIZE)) {
+ bcast_unit->current_mask = 0;
+ mali_bcast_reset(bcast_unit);
+
+ return bcast_unit;
+ } else {
+ MALI_PRINT_ERROR(("Broadcast: Failed map broadcast unit\n"));
+ }
+
+ _mali_osk_free(bcast_unit);
+
+ return NULL;
+}
+
+void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit)
+{
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+ mali_hw_core_delete(&bcast_unit->hw_core);
+ _mali_osk_free(bcast_unit);
+}
+
+/* Call this function to add the @group's id into bcast mask
+ * Note: redundant calling this function with same @group
+ * doesn't make any difference as calling it once
+ */
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit,
+ struct mali_group *group)
+{
+ u32 bcast_id;
+ u32 broadcast_mask;
+
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group));
+
+ broadcast_mask = bcast_unit->current_mask;
+
+ broadcast_mask |= (bcast_id); /* add PP core to broadcast */
+ broadcast_mask |= (bcast_id << 16); /* add MMU to broadcast */
+
+ /* store mask so we can restore on reset */
+ bcast_unit->current_mask = broadcast_mask;
+}
+
+/* Call this function to remove @group's id from bcast mask
+ * Note: redundant calling this function with same @group
+ * doesn't make any difference as calling it once
+ */
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit,
+ struct mali_group *group)
+{
+ u32 bcast_id;
+ u32 broadcast_mask;
+
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group));
+
+ broadcast_mask = bcast_unit->current_mask;
+
+ broadcast_mask &= ~((bcast_id << 16) | bcast_id);
+
+ /* store mask so we can restore on reset */
+ bcast_unit->current_mask = broadcast_mask;
+}
+
+void mali_bcast_reset(struct mali_bcast_unit *bcast_unit)
+{
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+ MALI_DEBUG_PRINT(4,
+ ("Broadcast: setting mask 0x%08X + 0x%08X (reset)\n",
+ bcast_unit->current_mask,
+ bcast_unit->current_mask & 0xFF));
+
+ /* set broadcast mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ MALI_BROADCAST_REG_BROADCAST_MASK,
+ bcast_unit->current_mask);
+
+ /* set IRQ override mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ MALI_BROADCAST_REG_INTERRUPT_MASK,
+ bcast_unit->current_mask & 0xFF);
+}
+
+void mali_bcast_disable(struct mali_bcast_unit *bcast_unit)
+{
+ MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+
+ MALI_DEBUG_PRINT(4, ("Broadcast: setting mask 0x0 + 0x0 (disable)\n"));
+
+ /* set broadcast mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ MALI_BROADCAST_REG_BROADCAST_MASK,
+ 0x0);
+
+ /* set IRQ override mask */
+ mali_hw_core_register_write(&bcast_unit->hw_core,
+ MALI_BROADCAST_REG_INTERRUPT_MASK,
+ 0x0);
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_broadcast.h b/drivers/gpu/arm/utgard/common/mali_broadcast.h
new file mode 100644
index 000000000000..efce44142ee9
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_broadcast.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_BROADCAST_H__
+#define __MALI_BROADCAST_H__
+
+/*
+ * Interface for the broadcast unit on Mali-450.
+ *
+ * - Represents up to 8 × (MMU + PP) pairs.
+ * - Supports dynamically changing which (MMU + PP) pairs receive the broadcast by
+ * setting a mask.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_group.h"
+
+struct mali_bcast_unit;
+
+struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource);
+void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit);
+
+/* Add a group to the list of (MMU + PP) pairs broadcasts go out to. */
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group);
+
+/* Remove a group to the list of (MMU + PP) pairs broadcasts go out to. */
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group);
+
+/* Re-set cached mask. This needs to be called after having been suspended. */
+void mali_bcast_reset(struct mali_bcast_unit *bcast_unit);
+
+/**
+ * Disable broadcast unit
+ *
+ * mali_bcast_enable must be called to re-enable the unit. Cores may not be
+ * added or removed when the unit is disabled.
+ */
+void mali_bcast_disable(struct mali_bcast_unit *bcast_unit);
+
+/**
+ * Re-enable broadcast unit
+ *
+ * This resets the masks to include the cores present when mali_bcast_disable was called.
+ */
+MALI_STATIC_INLINE void mali_bcast_enable(struct mali_bcast_unit *bcast_unit)
+{
+ mali_bcast_reset(bcast_unit);
+}
+
+#endif /* __MALI_BROADCAST_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_control_timer.c b/drivers/gpu/arm/utgard/common/mali_control_timer.c
new file mode 100644
index 000000000000..d0dd95ac1b39
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_control_timer.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2010-2012, 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_dvfs_policy.h"
+#include "mali_control_timer.h"
+
+static u64 period_start_time = 0;
+
+static _mali_osk_timer_t *mali_control_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+static u32 mali_control_timeout = 1000;
+
+void mali_control_timer_add(u32 timeout)
+{
+ _mali_osk_timer_add(mali_control_timer, _mali_osk_time_mstoticks(timeout));
+}
+
+static void mali_control_timer_callback(void *arg)
+{
+ if (mali_utilization_enabled()) {
+ struct mali_gpu_utilization_data *util_data = NULL;
+ u64 time_period = 0;
+ mali_bool need_add_timer = MALI_TRUE;
+
+ /* Calculate gpu utilization */
+ util_data = mali_utilization_calculate(&period_start_time, &time_period, &need_add_timer);
+
+ if (util_data) {
+#if defined(CONFIG_MALI_DVFS)
+ mali_dvfs_policy_realize(util_data, time_period);
+#else
+ mali_utilization_platform_realize(util_data);
+#endif
+
+ if (MALI_TRUE == need_add_timer) {
+ mali_control_timer_add(mali_control_timeout);
+ }
+ }
+ }
+}
+
+/* Init a timer (for now it is used for GPU utilization and dvfs) */
+_mali_osk_errcode_t mali_control_timer_init(void)
+{
+ _mali_osk_device_data data;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Use device specific settings (if defined) */
+ if (0 != data.control_interval) {
+ mali_control_timeout = data.control_interval;
+ MALI_DEBUG_PRINT(2, ("Mali GPU Timer: %u\n", mali_control_timeout));
+ }
+ }
+
+ mali_control_timer = _mali_osk_timer_init();
+ if (NULL == mali_control_timer) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ _mali_osk_timer_setcallback(mali_control_timer, mali_control_timer_callback, NULL);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_control_timer_term(void)
+{
+ if (NULL != mali_control_timer) {
+ _mali_osk_timer_del(mali_control_timer);
+ timer_running = MALI_FALSE;
+ _mali_osk_timer_term(mali_control_timer);
+ mali_control_timer = NULL;
+ }
+}
+
+mali_bool mali_control_timer_resume(u64 time_now)
+{
+ mali_utilization_data_assert_locked();
+
+ if (timer_running != MALI_TRUE) {
+ timer_running = MALI_TRUE;
+
+ period_start_time = time_now;
+
+ mali_utilization_reset();
+
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+void mali_control_timer_pause(void)
+{
+ mali_utilization_data_assert_locked();
+ if (timer_running == MALI_TRUE) {
+ timer_running = MALI_FALSE;
+ }
+}
+
+void mali_control_timer_suspend(mali_bool suspend)
+{
+ mali_utilization_data_lock();
+
+ if (timer_running == MALI_TRUE) {
+ timer_running = MALI_FALSE;
+
+ mali_utilization_data_unlock();
+
+ if (suspend == MALI_TRUE) {
+ _mali_osk_timer_del(mali_control_timer);
+ mali_utilization_reset();
+ }
+ } else {
+ mali_utilization_data_unlock();
+ }
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_control_timer.h b/drivers/gpu/arm/utgard/common/mali_control_timer.h
new file mode 100644
index 000000000000..4f919ecfd70a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_control_timer.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2010-2012, 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_CONTROL_TIMER_H__
+#define __MALI_CONTROL_TIMER_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_control_timer_init(void);
+
+void mali_control_timer_term(void);
+
+mali_bool mali_control_timer_resume(u64 time_now);
+
+void mali_control_timer_suspend(mali_bool suspend);
+void mali_control_timer_pause(void);
+
+void mali_control_timer_add(u32 timeout);
+
+#endif /* __MALI_CONTROL_TIMER_H__ */
+
diff --git a/drivers/gpu/arm/utgard/common/mali_dlbu.c b/drivers/gpu/arm/utgard/common/mali_dlbu.c
new file mode 100644
index 000000000000..efe1ab3ab5cc
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_dlbu.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_dlbu.h"
+#include "mali_memory.h"
+#include "mali_pp.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+
+/**
+ * Size of DLBU registers in bytes
+ */
+#define MALI_DLBU_SIZE 0x400
+
+mali_dma_addr mali_dlbu_phys_addr = 0;
+static mali_io_address mali_dlbu_cpu_addr = NULL;
+
+/**
+ * DLBU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_dlbu_register {
+ MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR = 0x0000, /**< Master tile list physical base address;
+ 31:12 Physical address to the page used for the DLBU
+ 0 DLBU enable - set this bit to 1 enables the AXI bus
+ between PPs and L2s, setting to 0 disables the router and
+ no further transactions are sent to DLBU */
+ MALI_DLBU_REGISTER_MASTER_TLLIST_VADDR = 0x0004, /**< Master tile list virtual base address;
+ 31:12 Virtual address to the page used for the DLBU */
+ MALI_DLBU_REGISTER_TLLIST_VBASEADDR = 0x0008, /**< Tile list virtual base address;
+ 31:12 Virtual address to the tile list. This address is used when
+ calculating the call address sent to PP.*/
+ MALI_DLBU_REGISTER_FB_DIM = 0x000C, /**< Framebuffer dimension;
+ 23:16 Number of tiles in Y direction-1
+ 7:0 Number of tiles in X direction-1 */
+ MALI_DLBU_REGISTER_TLLIST_CONF = 0x0010, /**< Tile list configuration;
+ 29:28 select the size of each allocated block: 0=128 bytes, 1=256, 2=512, 3=1024
+ 21:16 2^n number of tiles to be binned to one tile list in Y direction
+ 5:0 2^n number of tiles to be binned to one tile list in X direction */
+ MALI_DLBU_REGISTER_START_TILE_POS = 0x0014, /**< Start tile positions;
+ 31:24 start position in Y direction for group 1
+ 23:16 start position in X direction for group 1
+ 15:8 start position in Y direction for group 0
+ 7:0 start position in X direction for group 0 */
+ MALI_DLBU_REGISTER_PP_ENABLE_MASK = 0x0018, /**< PP enable mask;
+ 7 enable PP7 for load balancing
+ 6 enable PP6 for load balancing
+ 5 enable PP5 for load balancing
+ 4 enable PP4 for load balancing
+ 3 enable PP3 for load balancing
+ 2 enable PP2 for load balancing
+ 1 enable PP1 for load balancing
+ 0 enable PP0 for load balancing */
+} mali_dlbu_register;
+
+typedef enum {
+ PP0ENABLE = 0,
+ PP1ENABLE,
+ PP2ENABLE,
+ PP3ENABLE,
+ PP4ENABLE,
+ PP5ENABLE,
+ PP6ENABLE,
+ PP7ENABLE
+} mali_dlbu_pp_enable;
+
+struct mali_dlbu_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ u32 pp_cores_mask; /**< This is a mask for the PP cores whose operation will be controlled by LBU
+ see MALI_DLBU_REGISTER_PP_ENABLE_MASK register */
+};
+
+_mali_osk_errcode_t mali_dlbu_initialize(void)
+{
+ MALI_DEBUG_PRINT(2, ("Mali DLBU: Initializing\n"));
+
+ if (_MALI_OSK_ERR_OK ==
+ mali_mmu_get_table_page(&mali_dlbu_phys_addr,
+ &mali_dlbu_cpu_addr)) {
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_dlbu_terminate(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali DLBU: terminating\n"));
+
+ if (0 != mali_dlbu_phys_addr && 0 != mali_dlbu_cpu_addr) {
+ mali_mmu_release_table_page(mali_dlbu_phys_addr,
+ mali_dlbu_cpu_addr);
+ mali_dlbu_phys_addr = 0;
+ mali_dlbu_cpu_addr = 0;
+ }
+}
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource)
+{
+ struct mali_dlbu_core *core = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Mali DLBU: Creating Mali dynamic load balancing unit: %s\n", resource->description));
+
+ core = _mali_osk_malloc(sizeof(struct mali_dlbu_core));
+ if (NULL != core) {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI_DLBU_SIZE)) {
+ core->pp_cores_mask = 0;
+ if (_MALI_OSK_ERR_OK == mali_dlbu_reset(core)) {
+ return core;
+ }
+ MALI_PRINT_ERROR(("Failed to reset DLBU %s\n", core->hw_core.description));
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ } else {
+ MALI_PRINT_ERROR(("Mali DLBU: Failed to allocate memory for DLBU core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu)
+{
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+ mali_hw_core_delete(&dlbu->hw_core);
+ _mali_osk_free(dlbu);
+}
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu)
+{
+ u32 dlbu_registers[7];
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+ MALI_DEBUG_PRINT(4, ("Mali DLBU: mali_dlbu_reset: %s\n", dlbu->hw_core.description));
+
+ dlbu_registers[0] = mali_dlbu_phys_addr | 1; /* bit 0 enables the whole core */
+ dlbu_registers[1] = MALI_DLBU_VIRT_ADDR;
+ dlbu_registers[2] = 0;
+ dlbu_registers[3] = 0;
+ dlbu_registers[4] = 0;
+ dlbu_registers[5] = 0;
+ dlbu_registers[6] = dlbu->pp_cores_mask;
+
+ /* write reset values to core registers */
+ mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR, dlbu_registers, 7);
+
+ err = _MALI_OSK_ERR_OK;
+
+ return err;
+}
+
+void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu)
+{
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask);
+}
+
+void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+ struct mali_pp_core *pp_core;
+ u32 bcast_id;
+
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ pp_core = mali_group_get_pp_core(group);
+ bcast_id = mali_pp_core_get_bcast_id(pp_core);
+
+ dlbu->pp_cores_mask |= bcast_id;
+ MALI_DEBUG_PRINT(3, ("Mali DLBU: Adding core[%d] New mask= 0x%02x\n", bcast_id , dlbu->pp_cores_mask));
+}
+
+/* Remove a group from the DLBU */
+void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+ struct mali_pp_core *pp_core;
+ u32 bcast_id;
+
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ pp_core = mali_group_get_pp_core(group);
+ bcast_id = mali_pp_core_get_bcast_id(pp_core);
+
+ dlbu->pp_cores_mask &= ~bcast_id;
+ MALI_DEBUG_PRINT(3, ("Mali DLBU: Removing core[%d] New mask= 0x%02x\n", bcast_id, dlbu->pp_cores_mask));
+}
+
+/* Configure the DLBU for \a job. This needs to be done before the job is started on the groups in the DLBU. */
+void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job)
+{
+ u32 *registers;
+ MALI_DEBUG_ASSERT(job);
+ registers = mali_pp_job_get_dlbu_registers(job);
+ MALI_DEBUG_PRINT(4, ("Mali DLBU: Starting job\n"));
+
+ /* Writing 4 registers:
+ * DLBU registers except the first two (written once at DLBU initialisation / reset) and the PP_ENABLE_MASK register */
+ mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_VBASEADDR, registers, 4);
+
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_dlbu.h b/drivers/gpu/arm/utgard/common/mali_dlbu.h
new file mode 100644
index 000000000000..6b068884bd49
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_dlbu.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DLBU_H__
+#define __MALI_DLBU_H__
+
+#define MALI_DLBU_VIRT_ADDR 0xFFF00000 /* master tile virtual address fixed at this value and mapped into every session */
+
+#include "mali_osk.h"
+
+struct mali_pp_job;
+struct mali_group;
+struct mali_dlbu_core;
+
+extern mali_dma_addr mali_dlbu_phys_addr;
+
+_mali_osk_errcode_t mali_dlbu_initialize(void);
+void mali_dlbu_terminate(void);
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource);
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu);
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu);
+
+void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+
+/** @brief Called to update HW after DLBU state changed
+ *
+ * This function must be called after \a mali_dlbu_add_group or \a
+ * mali_dlbu_remove_group to write the updated mask to hardware, unless the
+ * same is accomplished by calling \a mali_dlbu_reset.
+ */
+void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu);
+
+void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job);
+
+#endif /* __MALI_DLBU_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_dvfs_policy.c b/drivers/gpu/arm/utgard/common/mali_dvfs_policy.c
new file mode 100644
index 000000000000..12ba069ec13b
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_dvfs_policy.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2010-2012, 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include "mali_scheduler.h"
+#include "mali_dvfs_policy.h"
+#include "mali_osk_mali.h"
+#include "mali_osk_profiling.h"
+
+#define CLOCK_TUNING_TIME_DEBUG 0
+
+#define MAX_PERFORMANCE_VALUE 256
+#define MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(percent) ((int) ((percent)*(MAX_PERFORMANCE_VALUE)/100.0 + 0.5))
+
+/** The max fps the same as display vsync default 60, can set by module insert parameter */
+int mali_max_system_fps = 60;
+/** A lower limit on their desired FPS default 58, can set by module insert parameter */
+int mali_desired_fps = 58;
+
+static int mali_fps_step1 = 0;
+static int mali_fps_step2 = 0;
+
+static int clock_step = -1;
+static int cur_clk_step = -1;
+static struct mali_gpu_clock *gpu_clk = NULL;
+
+/*Function prototype */
+static int (*mali_gpu_set_freq)(int) = NULL;
+static int (*mali_gpu_get_freq)(void) = NULL;
+
+static mali_bool mali_dvfs_enabled = MALI_FALSE;
+
+#define NUMBER_OF_NANOSECONDS_PER_SECOND 1000000000ULL
+static u32 calculate_window_render_fps(u64 time_period)
+{
+ u32 max_window_number;
+ u64 tmp;
+ u64 max = time_period;
+ u32 leading_zeroes;
+ u32 shift_val;
+ u32 time_period_shift;
+ u32 max_window_number_shift;
+ u32 ret_val;
+
+ max_window_number = mali_session_max_window_num();
+
+ /* To avoid float division, extend the dividend to ns unit */
+ tmp = (u64)max_window_number * NUMBER_OF_NANOSECONDS_PER_SECOND;
+ if (tmp > time_period) {
+ max = tmp;
+ }
+
+ /*
+ * We may have 64-bit values, a dividend or a divisor or both
+ * To avoid dependencies to a 64-bit divider, we shift down the two values
+ * equally first.
+ */
+ leading_zeroes = _mali_osk_clz((u32)(max >> 32));
+ shift_val = 32 - leading_zeroes;
+
+ time_period_shift = (u32)(time_period >> shift_val);
+ max_window_number_shift = (u32)(tmp >> shift_val);
+
+ ret_val = max_window_number_shift / time_period_shift;
+
+ return ret_val;
+}
+
+static bool mali_pickup_closest_avail_clock(int target_clock_mhz, mali_bool pick_clock_up)
+{
+ int i = 0;
+ bool clock_changed = false;
+
+ /* Round up the closest available frequency step for target_clock_hz */
+ for (i = 0; i < gpu_clk->num_of_steps; i++) {
+ /* Find the first item > target_clock_hz */
+ if (((int)(gpu_clk->item[i].clock) - target_clock_mhz) > 0) {
+ break;
+ }
+ }
+
+ /* If the target clock greater than the maximum clock just pick the maximum one*/
+ if (i == gpu_clk->num_of_steps) {
+ i = gpu_clk->num_of_steps - 1;
+ } else {
+ if ((!pick_clock_up) && (i > 0)) {
+ i = i - 1;
+ }
+ }
+
+ clock_step = i;
+ if (cur_clk_step != clock_step) {
+ clock_changed = true;
+ }
+
+ return clock_changed;
+}
+
+void mali_dvfs_policy_realize(struct mali_gpu_utilization_data *data, u64 time_period)
+{
+ int under_perform_boundary_value = 0;
+ int over_perform_boundary_value = 0;
+ int current_fps = 0;
+ int current_gpu_util = 0;
+ bool clock_changed = false;
+#if CLOCK_TUNING_TIME_DEBUG
+ struct timeval start;
+ struct timeval stop;
+ unsigned int elapse_time;
+ do_gettimeofday(&start);
+#endif
+ u32 window_render_fps;
+
+ if (NULL == gpu_clk) {
+ MALI_DEBUG_PRINT(2, ("Enable DVFS but patform doesn't Support freq change. \n"));
+ return;
+ }
+
+ window_render_fps = calculate_window_render_fps(time_period);
+
+ current_fps = window_render_fps;
+ current_gpu_util = data->utilization_gpu;
+
+ /* Get the specific under_perform_boundary_value and over_perform_boundary_value */
+ if ((mali_desired_fps <= current_fps) && (current_fps < mali_max_system_fps)) {
+ under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(90);
+ over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70);
+ } else if ((mali_fps_step1 <= current_fps) && (current_fps < mali_desired_fps)) {
+ under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55);
+ over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35);
+ } else if ((mali_fps_step2 <= current_fps) && (current_fps < mali_fps_step1)) {
+ under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70);
+ over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(50);
+ } else {
+ under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55);
+ over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35);
+ }
+
+ MALI_DEBUG_PRINT(5, ("Using ARM power policy: gpu util = %d \n", current_gpu_util));
+ MALI_DEBUG_PRINT(5, ("Using ARM power policy: under_perform = %d, over_perform = %d \n", under_perform_boundary_value, over_perform_boundary_value));
+ MALI_DEBUG_PRINT(5, ("Using ARM power policy: render fps = %d, pressure render fps = %d \n", current_fps, window_render_fps));
+
+ /* Get current clock value */
+ cur_clk_step = mali_gpu_get_freq();
+
+ /* Consider offscreen */
+ if (0 == current_fps) {
+ /* GP or PP under perform, need to give full power */
+ if (current_gpu_util > over_perform_boundary_value) {
+ if (cur_clk_step != gpu_clk->num_of_steps - 1) {
+ clock_changed = true;
+ clock_step = gpu_clk->num_of_steps - 1;
+ }
+ }
+
+ /* If GPU is idle, use lowest power */
+ if (0 == current_gpu_util) {
+ if (cur_clk_step != 0) {
+ clock_changed = true;
+ clock_step = 0;
+ }
+ }
+
+ goto real_setting;
+ }
+
+ /* 2. Calculate target clock if the GPU clock can be tuned */
+ if (-1 != cur_clk_step) {
+ int target_clk_mhz = -1;
+ mali_bool pick_clock_up = MALI_TRUE;
+
+ if (current_gpu_util > under_perform_boundary_value) {
+ /* when under perform, need to consider the fps part */
+ target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util * mali_desired_fps / under_perform_boundary_value / current_fps;
+ pick_clock_up = MALI_TRUE;
+ } else if (current_gpu_util < over_perform_boundary_value) {
+ /* when over perform, did't need to consider fps, system didn't want to reach desired fps */
+ target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util / under_perform_boundary_value;
+ pick_clock_up = MALI_FALSE;
+ }
+
+ if (-1 != target_clk_mhz) {
+ clock_changed = mali_pickup_closest_avail_clock(target_clk_mhz, pick_clock_up);
+ }
+ }
+
+real_setting:
+ if (clock_changed) {
+ mali_gpu_set_freq(clock_step);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ gpu_clk->item[clock_step].clock,
+ gpu_clk->item[clock_step].vol / 1000,
+ 0, 0, 0);
+ }
+
+#if CLOCK_TUNING_TIME_DEBUG
+ do_gettimeofday(&stop);
+
+ elapse_time = timeval_to_ns(&stop) - timeval_to_ns(&start);
+ MALI_DEBUG_PRINT(2, ("Using ARM power policy: eclapse time = %d\n", elapse_time));
+#endif
+}
+
+_mali_osk_errcode_t mali_dvfs_policy_init(void)
+{
+ _mali_osk_device_data data;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ if ((NULL != data.get_clock_info) && (NULL != data.set_freq) && (NULL != data.get_freq)) {
+ MALI_DEBUG_PRINT(2, ("Mali DVFS init: using arm dvfs policy \n"));
+
+
+ mali_fps_step1 = mali_max_system_fps / 3;
+ mali_fps_step2 = mali_max_system_fps / 5;
+
+ data.get_clock_info(&gpu_clk);
+
+ if (gpu_clk != NULL) {
+#ifdef DEBUG
+ int i;
+ for (i = 0; i < gpu_clk->num_of_steps; i++) {
+ MALI_DEBUG_PRINT(5, ("mali gpu clock info: step%d clock(%d)Hz,vol(%d) \n",
+ i, gpu_clk->item[i].clock, gpu_clk->item[i].vol));
+ }
+#endif
+ } else {
+ MALI_DEBUG_PRINT(2, ("Mali DVFS init: platform didn't define enough info for ddk to do DVFS \n"));
+ }
+
+ mali_gpu_get_freq = data.get_freq;
+ mali_gpu_set_freq = data.set_freq;
+
+ if ((NULL != gpu_clk) && (gpu_clk->num_of_steps > 0)
+ && (NULL != mali_gpu_get_freq) && (NULL != mali_gpu_set_freq)) {
+ mali_dvfs_enabled = MALI_TRUE;
+ }
+ } else {
+ MALI_DEBUG_PRINT(2, ("Mali DVFS init: platform function callback incomplete, need check mali_gpu_device_data in platform .\n"));
+ }
+ } else {
+ err = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_PRINT(2, ("Mali DVFS init: get platform data error .\n"));
+ }
+
+ return err;
+}
+
+/*
+ * Always give full power when start a new period,
+ * if mali dvfs enabled, for performance consideration
+ */
+void mali_dvfs_policy_new_period(void)
+{
+ /* Always give full power when start a new period */
+ unsigned int cur_clk_step = 0;
+
+ cur_clk_step = mali_gpu_get_freq();
+
+ if (cur_clk_step != (gpu_clk->num_of_steps - 1)) {
+ mali_gpu_set_freq(gpu_clk->num_of_steps - 1);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, gpu_clk->item[gpu_clk->num_of_steps - 1].clock,
+ gpu_clk->item[gpu_clk->num_of_steps - 1].vol / 1000, 0, 0, 0);
+ }
+}
+
+mali_bool mali_dvfs_policy_enabled(void)
+{
+ return mali_dvfs_enabled;
+}
+
+#if defined(CONFIG_MALI400_PROFILING)
+void mali_get_current_gpu_clk_item(struct mali_gpu_clk_item *clk_item)
+{
+ if (mali_platform_device != NULL) {
+
+ struct mali_gpu_device_data *device_data = NULL;
+ device_data = (struct mali_gpu_device_data *)mali_platform_device->dev.platform_data;
+
+ if ((NULL != device_data->get_clock_info) && (NULL != device_data->get_freq)) {
+
+ int cur_clk_step = device_data->get_freq();
+ struct mali_gpu_clock *mali_gpu_clk = NULL;
+
+ device_data->get_clock_info(&mali_gpu_clk);
+ clk_item->clock = mali_gpu_clk->item[cur_clk_step].clock;
+ clk_item->vol = mali_gpu_clk->item[cur_clk_step].vol;
+ } else {
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: platform function callback incomplete, need check mali_gpu_device_data in platform .\n"));
+ }
+ }
+}
+#endif
+
diff --git a/drivers/gpu/arm/utgard/common/mali_dvfs_policy.h b/drivers/gpu/arm/utgard/common/mali_dvfs_policy.h
new file mode 100644
index 000000000000..55e4b354c1a9
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_dvfs_policy.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2010-2012, 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DVFS_POLICY_H__
+#define __MALI_DVFS_POLICY_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void mali_dvfs_policy_realize(struct mali_gpu_utilization_data *data, u64 time_period);
+
+_mali_osk_errcode_t mali_dvfs_policy_init(void);
+
+void mali_dvfs_policy_new_period(void);
+
+mali_bool mali_dvfs_policy_enabled(void);
+
+#if defined(CONFIG_MALI400_PROFILING)
+void mali_get_current_gpu_clk_item(struct mali_gpu_clk_item *clk_item);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif/* __MALI_DVFS_POLICY_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_executor.c b/drivers/gpu/arm/utgard/common/mali_executor.c
new file mode 100644
index 000000000000..3c2864386798
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_executor.c
@@ -0,0 +1,2642 @@
+/*
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_executor.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_timeline.h"
+#include "mali_osk_profiling.h"
+#include "mali_session.h"
+
+/*
+ * If dma_buf with map on demand is used, we defer job deletion and job queue
+ * if in atomic context, since both might sleep.
+ */
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_DELETE 1
+#define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_QUEUE 1
+#endif /* !defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) */
+
+/*
+ * ---------- static type definitions (structs, enums, etc) ----------
+ */
+
+enum mali_executor_state_t {
+ EXEC_STATE_NOT_PRESENT, /* Virtual group on Mali-300/400 (do not use) */
+ EXEC_STATE_DISABLED, /* Disabled by core scaling (do not use) */
+ EXEC_STATE_EMPTY, /* No child groups for virtual group (do not use) */
+ EXEC_STATE_INACTIVE, /* Can be used, but must be activate first */
+ EXEC_STATE_IDLE, /* Active and ready to be used */
+ EXEC_STATE_WORKING, /* Executing a job */
+};
+
+/*
+ * ---------- global variables (exported due to inline functions) ----------
+ */
+
+/* Lock for this module (protecting all HW access except L2 caches) */
+_mali_osk_spinlock_irq_t *mali_executor_lock_obj = NULL;
+
+mali_bool mali_executor_hints[MALI_EXECUTOR_HINT_MAX];
+
+/*
+ * ---------- static variables ----------
+ */
+
+/* Used to defer job scheduling */
+static _mali_osk_wq_work_t *executor_wq_high_pri = NULL;
+
+/* Store version from GP and PP (user space wants to know this) */
+static u32 pp_version = 0;
+static u32 gp_version = 0;
+
+/* List of physical PP groups which are disabled by some external source */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_disabled);
+static u32 group_list_disabled_count = 0;
+
+/* List of groups which can be used, but activate first */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_inactive);
+static u32 group_list_inactive_count = 0;
+
+/* List of groups which are active and ready to be used */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_idle);
+static u32 group_list_idle_count = 0;
+
+/* List of groups which are executing a job */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_working);
+static u32 group_list_working_count = 0;
+
+/* Virtual group (if any) */
+static struct mali_group *virtual_group = NULL;
+
+/* Virtual group state is tracked with a state variable instead of 4 lists */
+static enum mali_executor_state_t virtual_group_state = EXEC_STATE_NOT_PRESENT;
+
+/* GP group */
+static struct mali_group *gp_group = NULL;
+
+/* GP group state is tracked with a state variable instead of 4 lists */
+static enum mali_executor_state_t gp_group_state = EXEC_STATE_NOT_PRESENT;
+
+static u32 gp_returned_cookie = 0;
+
+/* Total number of physical PP cores present */
+static u32 num_physical_pp_cores_total = 0;
+
+/* Number of physical cores which are enabled */
+static u32 num_physical_pp_cores_enabled = 0;
+
+/* Enable or disable core scaling */
+static mali_bool core_scaling_enabled = MALI_TRUE;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *executor_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+/* PP cores haven't been enabled because of some pp cores haven't been disabled. */
+static int core_scaling_delay_up_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+
+/* Variables used to implement notify pp core changes to userspace when core scaling
+ * is finished in mali_executor_complete_group() function. */
+static _mali_osk_wq_work_t *executor_wq_notify_core_change = NULL;
+static _mali_osk_wait_queue_t *executor_notify_core_change_wait_queue = NULL;
+
+/*
+ * ---------- Forward declaration of static functions ----------
+ */
+static mali_bool mali_executor_is_suspended(void *data);
+static mali_bool mali_executor_is_working(void);
+static void mali_executor_disable_empty_virtual(void);
+static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group);
+static mali_bool mali_executor_has_virtual_group(void);
+static mali_bool mali_executor_virtual_group_is_usable(void);
+static void mali_executor_schedule(void);
+static void mali_executor_wq_schedule(void *arg);
+static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job, u32 added_size);
+static void mali_executor_complete_group(struct mali_group *group,
+ mali_bool success,
+ struct mali_gp_job **gp_job_done,
+ struct mali_pp_job **pp_job_done);
+static void mali_executor_change_state_pp_physical(struct mali_group *group,
+ _mali_osk_list_t *old_list,
+ u32 *old_count,
+ _mali_osk_list_t *new_list,
+ u32 *new_count);
+static mali_bool mali_executor_group_is_in_state(struct mali_group *group,
+ enum mali_executor_state_t state);
+
+static void mali_executor_group_enable_internal(struct mali_group *group);
+static void mali_executor_group_disable_internal(struct mali_group *group);
+static void mali_executor_core_scale(unsigned int target_core_nr);
+static void mali_executor_core_scale_in_group_complete(struct mali_group *group);
+static void mali_executor_notify_core_change(u32 num_cores);
+static void mali_executor_wq_notify_core_change(void *arg);
+static void mali_executor_change_group_status_disabled(struct mali_group *group);
+static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group);
+static void mali_executor_set_state_pp_physical(struct mali_group *group,
+ _mali_osk_list_t *new_list,
+ u32 *new_count);
+
+/*
+ * ---------- Actual implementation ----------
+ */
+
+_mali_osk_errcode_t mali_executor_initialize(void)
+{
+ mali_executor_lock_obj = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_EXECUTOR);
+ if (NULL == mali_executor_lock_obj) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ executor_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_executor_wq_schedule, NULL);
+ if (NULL == executor_wq_high_pri) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ executor_working_wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == executor_working_wait_queue) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ executor_wq_notify_core_change = _mali_osk_wq_create_work(mali_executor_wq_notify_core_change, NULL);
+ if (NULL == executor_wq_notify_core_change) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ executor_notify_core_change_wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == executor_notify_core_change_wait_queue) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_executor_terminate(void)
+{
+ if (NULL != executor_notify_core_change_wait_queue) {
+ _mali_osk_wait_queue_term(executor_notify_core_change_wait_queue);
+ executor_notify_core_change_wait_queue = NULL;
+ }
+
+ if (NULL != executor_wq_notify_core_change) {
+ _mali_osk_wq_delete_work(executor_wq_notify_core_change);
+ executor_wq_notify_core_change = NULL;
+ }
+
+ if (NULL != executor_working_wait_queue) {
+ _mali_osk_wait_queue_term(executor_working_wait_queue);
+ executor_working_wait_queue = NULL;
+ }
+
+ if (NULL != executor_wq_high_pri) {
+ _mali_osk_wq_delete_work(executor_wq_high_pri);
+ executor_wq_high_pri = NULL;
+ }
+
+ if (NULL != mali_executor_lock_obj) {
+ _mali_osk_spinlock_irq_term(mali_executor_lock_obj);
+ mali_executor_lock_obj = NULL;
+ }
+}
+
+void mali_executor_populate(void)
+{
+ u32 num_groups;
+ u32 i;
+
+ num_groups = mali_group_get_glob_num_groups();
+
+ /* Do we have a virtual group? */
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ if (mali_group_is_virtual(group)) {
+ virtual_group = group;
+ virtual_group_state = EXEC_STATE_INACTIVE;
+ break;
+ }
+ }
+
+ /* Find all the available physical GP and PP cores */
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ if (NULL != group) {
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+
+ if (!mali_group_is_virtual(group)) {
+ if (NULL != pp_core) {
+ if (0 == pp_version) {
+ /* Retrieve PP version from the first available PP core */
+ pp_version = mali_pp_core_get_version(pp_core);
+ }
+
+ if (NULL != virtual_group) {
+ mali_executor_lock();
+ mali_group_add_group(virtual_group, group);
+ mali_executor_unlock();
+ } else {
+ _mali_osk_list_add(&group->executor_list, &group_list_inactive);
+ group_list_inactive_count++;
+ }
+
+ num_physical_pp_cores_total++;
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(gp_core);
+
+ if (0 == gp_version) {
+ /* Retrieve GP version */
+ gp_version = mali_gp_core_get_version(gp_core);
+ }
+
+ gp_group = group;
+ gp_group_state = EXEC_STATE_INACTIVE;
+ }
+
+ }
+ }
+ }
+
+ num_physical_pp_cores_enabled = num_physical_pp_cores_total;
+}
+
+void mali_executor_depopulate(void)
+{
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state);
+
+ if (NULL != gp_group) {
+ mali_group_delete(gp_group);
+ gp_group = NULL;
+ }
+
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state);
+
+ if (NULL != virtual_group) {
+ mali_group_delete(virtual_group);
+ virtual_group = NULL;
+ }
+
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
+ mali_group_delete(group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
+ mali_group_delete(group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
+ mali_group_delete(group);
+ }
+}
+
+void mali_executor_suspend(void)
+{
+ mali_executor_lock();
+
+ /* Increment the pause_count so that no more jobs will be scheduled */
+ pause_count++;
+
+ mali_executor_unlock();
+
+ _mali_osk_wait_queue_wait_event(executor_working_wait_queue,
+ mali_executor_is_suspended, NULL);
+
+ /*
+ * mali_executor_complete_XX() leaves jobs in idle state.
+ * deactivate option is used when we are going to power down
+ * the entire GPU (OS suspend) and want a consistent SW vs HW
+ * state.
+ */
+ mali_executor_lock();
+
+ mali_executor_deactivate_list_idle(MALI_TRUE);
+
+ /*
+ * The following steps are used to deactive all of activated
+ * (MALI_GROUP_STATE_ACTIVE) and activating (MALI_GROUP
+ * _STAET_ACTIVATION_PENDING) groups, to make sure the variable
+ * pd_mask_wanted is equal with 0. */
+ if (MALI_GROUP_STATE_INACTIVE != mali_group_get_state(gp_group)) {
+ gp_group_state = EXEC_STATE_INACTIVE;
+ mali_group_deactivate(gp_group);
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ if (MALI_GROUP_STATE_INACTIVE
+ != mali_group_get_state(virtual_group)) {
+ virtual_group_state = EXEC_STATE_INACTIVE;
+ mali_group_deactivate(virtual_group);
+ }
+ }
+
+ if (0 < group_list_inactive_count) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+ &group_list_inactive,
+ struct mali_group, executor_list) {
+ if (MALI_GROUP_STATE_ACTIVATION_PENDING
+ == mali_group_get_state(group)) {
+ mali_group_deactivate(group);
+ }
+
+ /*
+ * On mali-450 platform, we may have physical group in the group inactive
+ * list, and its state is MALI_GROUP_STATE_ACTIVATION_PENDING, so we only
+ * deactivate it is not enough, we still also need add it back to virtual group.
+ * And now, virtual group must be in INACTIVE state, so it's safe to add
+ * physical group to virtual group at this point.
+ */
+ if (NULL != virtual_group) {
+ _mali_osk_list_delinit(&group->executor_list);
+ group_list_inactive_count--;
+
+ mali_group_add_group(virtual_group, group);
+ }
+ }
+ }
+
+ mali_executor_unlock();
+}
+
+void mali_executor_resume(void)
+{
+ mali_executor_lock();
+
+ /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+ pause_count--;
+ if (0 == pause_count) {
+ mali_executor_schedule();
+ }
+
+ mali_executor_unlock();
+}
+
+u32 mali_executor_get_num_cores_total(void)
+{
+ return num_physical_pp_cores_total;
+}
+
+u32 mali_executor_get_num_cores_enabled(void)
+{
+ return num_physical_pp_cores_enabled;
+}
+
+struct mali_pp_core *mali_executor_get_virtual_pp(void)
+{
+ MALI_DEBUG_ASSERT_POINTER(virtual_group);
+ MALI_DEBUG_ASSERT_POINTER(virtual_group->pp_core);
+ return virtual_group->pp_core;
+}
+
+struct mali_group *mali_executor_get_virtual_group(void)
+{
+ return virtual_group;
+}
+
+void mali_executor_zap_all_active(struct mali_session_data *session)
+{
+ struct mali_group *group;
+ struct mali_group *temp;
+ mali_bool ret;
+
+ mali_executor_lock();
+
+ /*
+ * This function is a bit complicated because
+ * mali_group_zap_session() can fail. This only happens because the
+ * group is in an unhandled page fault status.
+ * We need to make sure this page fault is handled before we return,
+ * so that we know every single outstanding MMU transactions have
+ * completed. This will allow caller to safely remove physical pages
+ * when we have returned.
+ */
+
+ MALI_DEBUG_ASSERT(NULL != gp_group);
+ ret = mali_group_zap_session(gp_group, session);
+ if (MALI_FALSE == ret) {
+ struct mali_gp_job *gp_job = NULL;
+
+ mali_executor_complete_group(gp_group, MALI_FALSE, &gp_job, NULL);
+
+ MALI_DEBUG_ASSERT_POINTER(gp_job);
+
+ /* GP job completed, make sure it is freed */
+ mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+ MALI_TRUE, MALI_TRUE);
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ ret = mali_group_zap_session(virtual_group, session);
+ if (MALI_FALSE == ret) {
+ struct mali_pp_job *pp_job = NULL;
+
+ mali_executor_complete_group(virtual_group, MALI_FALSE, NULL, &pp_job);
+
+ if (NULL != pp_job) {
+ /* PP job completed, make sure it is freed */
+ mali_scheduler_complete_pp_job(pp_job, 0,
+ MALI_FALSE, MALI_TRUE);
+ }
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working,
+ struct mali_group, executor_list) {
+ ret = mali_group_zap_session(group, session);
+ if (MALI_FALSE == ret) {
+ ret = mali_group_zap_session(group, session);
+ if (MALI_FALSE == ret) {
+ struct mali_pp_job *pp_job = NULL;
+
+ mali_executor_complete_group(group, MALI_FALSE, NULL, &pp_job);
+
+ if (NULL != pp_job) {
+ /* PP job completed, free it */
+ mali_scheduler_complete_pp_job(pp_job,
+ 0, MALI_FALSE,
+ MALI_TRUE);
+ }
+ }
+ }
+ }
+
+ mali_executor_unlock();
+}
+
+void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule)
+{
+ if (MALI_SCHEDULER_MASK_EMPTY != mask) {
+ if (MALI_TRUE == deferred_schedule) {
+ _mali_osk_wq_schedule_work_high_pri(executor_wq_high_pri);
+ } else {
+ /* Schedule from this thread*/
+ mali_executor_lock();
+ mali_executor_schedule();
+ mali_executor_unlock();
+ }
+ }
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group,
+ mali_bool in_upper_half)
+{
+ enum mali_interrupt_result int_result;
+ mali_bool time_out = MALI_FALSE;
+
+ MALI_DEBUG_PRINT(4, ("Executor: GP interrupt from %s in %s half\n",
+ mali_group_core_description(group),
+ in_upper_half ? "upper" : "bottom"));
+
+ mali_executor_lock();
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_working(group));
+
+ if (mali_group_has_timed_out(group)) {
+ int_result = MALI_INTERRUPT_RESULT_ERROR;
+ time_out = MALI_TRUE;
+ MALI_PRINT(("Executor GP: Job %d Timeout on %s\n",
+ mali_gp_job_get_id(group->gp_running_job),
+ mali_group_core_description(group)));
+ } else {
+ int_result = mali_group_get_interrupt_result_gp(group);
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ /* No interrupts signalled, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#else
+ MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result);
+#endif
+
+ mali_group_mask_all_interrupts_gp(group);
+
+ if (MALI_INTERRUPT_RESULT_SUCCESS_VS == int_result) {
+ if (mali_group_gp_is_active(group)) {
+ /* Only VS completed so far, while PLBU is still active */
+
+ /* Enable all but the current interrupt */
+ mali_group_enable_interrupts_gp(group, int_result);
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_OK;
+ }
+ } else if (MALI_INTERRUPT_RESULT_SUCCESS_PLBU == int_result) {
+ if (mali_group_gp_is_active(group)) {
+ /* Only PLBU completed so far, while VS is still active */
+
+ /* Enable all but the current interrupt */
+ mali_group_enable_interrupts_gp(group, int_result);
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_OK;
+ }
+ } else if (MALI_INTERRUPT_RESULT_OOM == int_result) {
+
+ mali_executor_unlock();
+
+ mali_group_schedule_oom_work_handler(group);
+
+ return _MALI_OSK_ERR_OK;
+ }
+
+ /* We should now have a real interrupt to handle */
+
+ MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n",
+ mali_group_core_description(group),
+ (MALI_INTERRUPT_RESULT_ERROR == int_result) ?
+ "ERROR" : "success"));
+
+ if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) {
+ /* Don't bother to do processing of errors in upper half */
+ mali_executor_unlock();
+
+ if (MALI_FALSE == time_out) {
+ mali_group_schedule_bottom_half_gp(group);
+ }
+ } else {
+ struct mali_gp_job *job;
+ mali_bool success;
+
+ if (MALI_TRUE == time_out) {
+ mali_group_dump_status(group);
+ }
+
+ success = (int_result != MALI_INTERRUPT_RESULT_ERROR) ?
+ MALI_TRUE : MALI_FALSE;
+
+ mali_executor_complete_group(group, success, &job, NULL);
+
+ mali_executor_unlock();
+
+ /* GP jobs always fully complete */
+ MALI_DEBUG_ASSERT(NULL != job);
+
+ /* This will notify user space and close the job object */
+ mali_scheduler_complete_gp_job(job, success,
+ MALI_TRUE, MALI_TRUE);
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group,
+ mali_bool in_upper_half)
+{
+ enum mali_interrupt_result int_result;
+ mali_bool time_out = MALI_FALSE;
+
+ MALI_DEBUG_PRINT(4, ("Executor: PP interrupt from %s in %s half\n",
+ mali_group_core_description(group),
+ in_upper_half ? "upper" : "bottom"));
+
+ mali_executor_lock();
+
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (in_upper_half) {
+ if (mali_group_is_in_virtual(group)) {
+ /* Child groups should never handle PP interrupts */
+ MALI_DEBUG_ASSERT(!mali_group_has_timed_out(group));
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_working(group));
+ MALI_DEBUG_ASSERT(!mali_group_is_in_virtual(group));
+
+ if (mali_group_has_timed_out(group)) {
+ int_result = MALI_INTERRUPT_RESULT_ERROR;
+ time_out = MALI_TRUE;
+ MALI_PRINT(("Executor PP: Job %d Timeout on %s\n",
+ mali_pp_job_get_id(group->pp_running_job),
+ mali_group_core_description(group)));
+ } else {
+ int_result = mali_group_get_interrupt_result_pp(group);
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ /* No interrupts signalled, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ } else if (MALI_INTERRUPT_RESULT_SUCCESS == int_result) {
+ if (mali_group_is_virtual(group) && mali_group_pp_is_active(group)) {
+ /* Some child groups are still working, so nothing to do right now */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+#else
+ MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result);
+#endif
+
+ /* We should now have a real interrupt to handle */
+
+ MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n",
+ mali_group_core_description(group),
+ (MALI_INTERRUPT_RESULT_ERROR == int_result) ?
+ "ERROR" : "success"));
+
+ if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) {
+ /* Don't bother to do processing of errors in upper half */
+ mali_group_mask_all_interrupts_pp(group);
+ mali_executor_unlock();
+
+ if (MALI_FALSE == time_out) {
+ mali_group_schedule_bottom_half_pp(group);
+ }
+ } else {
+ struct mali_pp_job *job = NULL;
+ mali_bool success;
+
+ if (MALI_TRUE == time_out) {
+ mali_group_dump_status(group);
+ }
+
+ success = (int_result == MALI_INTERRUPT_RESULT_SUCCESS) ?
+ MALI_TRUE : MALI_FALSE;
+
+ mali_executor_complete_group(group, success, NULL, &job);
+
+ mali_executor_unlock();
+
+ if (NULL != job) {
+ /* Notify user space and close the job object */
+ mali_scheduler_complete_pp_job(job,
+ num_physical_pp_cores_total,
+ MALI_TRUE, MALI_TRUE);
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group,
+ mali_bool in_upper_half)
+{
+ enum mali_interrupt_result int_result;
+
+ MALI_DEBUG_PRINT(4, ("Executor: MMU interrupt from %s in %s half\n",
+ mali_group_core_description(group),
+ in_upper_half ? "upper" : "bottom"));
+
+ mali_executor_lock();
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_working(group));
+
+ int_result = mali_group_get_interrupt_result_mmu(group);
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ /* No interrupts signalled, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#else
+ MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_ERROR == int_result);
+#endif
+
+ /* We should now have a real interrupt to handle */
+
+ if (in_upper_half) {
+ /* Don't bother to do processing of errors in upper half */
+
+ struct mali_group *parent = group->parent_group;
+
+ mali_mmu_mask_all_interrupts(group->mmu);
+
+ mali_executor_unlock();
+
+ if (NULL == parent) {
+ mali_group_schedule_bottom_half_mmu(group);
+ } else {
+ mali_group_schedule_bottom_half_mmu(parent);
+ }
+
+ } else {
+ struct mali_gp_job *gp_job = NULL;
+ struct mali_pp_job *pp_job = NULL;
+
+#ifdef DEBUG
+
+ u32 fault_address = mali_mmu_get_page_fault_addr(group->mmu);
+ u32 status = mali_mmu_get_status(group->mmu);
+ MALI_DEBUG_PRINT(2, ("Executor: Mali page fault detected at 0x%x from bus id %d of type %s on %s\n",
+ (void *)(uintptr_t)fault_address,
+ (status >> 6) & 0x1F,
+ (status & 32) ? "write" : "read",
+ group->mmu->hw_core.description));
+ MALI_DEBUG_PRINT(3, ("Executor: MMU rawstat = 0x%08X, MMU status = 0x%08X\n",
+ mali_mmu_get_rawstat(group->mmu), status));
+ mali_mmu_pagedir_diag(mali_session_get_page_directory(group->session), fault_address);
+#endif
+
+ mali_executor_complete_group(group, MALI_FALSE, &gp_job, &pp_job);
+
+ mali_executor_unlock();
+
+ if (NULL != gp_job) {
+ MALI_DEBUG_ASSERT(NULL == pp_job);
+
+ /* Notify user space and close the job object */
+ mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+ MALI_TRUE, MALI_TRUE);
+ } else if (NULL != pp_job) {
+ MALI_DEBUG_ASSERT(NULL == gp_job);
+
+ /* Notify user space and close the job object */
+ mali_scheduler_complete_pp_job(pp_job,
+ num_physical_pp_cores_total,
+ MALI_TRUE, MALI_TRUE);
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_executor_group_oom(struct mali_group *group)
+{
+ struct mali_gp_job *job = NULL;
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+ mali_executor_lock();
+
+ job = mali_group_get_running_gp_job(group);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /* Give group a chance to generate a SUSPEND event */
+ mali_group_oom(group);
+#endif
+
+ mali_gp_job_set_current_heap_addr(job, mali_gp_read_plbu_alloc_start_addr(group->gp_core));
+
+ mali_executor_unlock();
+
+ if (_MALI_OSK_ERR_OK == mali_mem_add_mem_size(job->session, job->heap_base_addr, job->heap_grow_size)) {
+ _mali_osk_notification_t *new_notification = NULL;
+
+ new_notification = _mali_osk_notification_create(
+ _MALI_NOTIFICATION_GP_STALLED,
+ sizeof(_mali_uk_gp_job_suspended_s));
+
+ /* resume job with new heap,
+ * This will also re-enable interrupts
+ */
+ mali_executor_lock();
+
+ mali_executor_send_gp_oom_to_user(job, job->heap_grow_size);
+
+ if (NULL != new_notification) {
+
+ mali_gp_job_set_oom_notification(job, new_notification);
+
+ mali_group_resume_gp_with_new_heap(group, mali_gp_job_get_id(job),
+ job->heap_current_addr,
+ job->heap_current_addr + job->heap_grow_size);
+ }
+ mali_executor_unlock();
+ } else {
+ mali_executor_lock();
+ mali_executor_send_gp_oom_to_user(job, 0);
+ mali_executor_unlock();
+ }
+
+}
+
+void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups)
+{
+ u32 i;
+ mali_bool child_groups_activated = MALI_FALSE;
+ mali_bool do_schedule = MALI_FALSE;
+#if defined(DEBUG)
+ u32 num_activated = 0;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(groups);
+ MALI_DEBUG_ASSERT(0 < num_groups);
+
+ mali_executor_lock();
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups\n", num_groups));
+
+ for (i = 0; i < num_groups; i++) {
+ MALI_DEBUG_PRINT(3, ("Executor: powering up group %s\n",
+ mali_group_core_description(groups[i])));
+
+ mali_group_power_up(groups[i]);
+
+ if ((MALI_GROUP_STATE_ACTIVATION_PENDING != mali_group_get_state(groups[i]) ||
+ (MALI_TRUE != mali_executor_group_is_in_state(groups[i], EXEC_STATE_INACTIVE)))) {
+ /* nothing more to do for this group */
+ continue;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Executor: activating group %s\n",
+ mali_group_core_description(groups[i])));
+
+#if defined(DEBUG)
+ num_activated++;
+#endif
+
+ if (mali_group_is_in_virtual(groups[i])) {
+ /*
+ * At least one child group of virtual group is powered on.
+ */
+ child_groups_activated = MALI_TRUE;
+ } else if (MALI_FALSE == mali_group_is_virtual(groups[i])) {
+ /* Set gp and pp not in virtual to active. */
+ mali_group_set_active(groups[i]);
+ }
+
+ /* Move group from inactive to idle list */
+ if (groups[i] == gp_group) {
+ MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE ==
+ gp_group_state);
+ gp_group_state = EXEC_STATE_IDLE;
+ } else if (MALI_FALSE == mali_group_is_in_virtual(groups[i])
+ && MALI_FALSE == mali_group_is_virtual(groups[i])) {
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_group_is_in_state(groups[i],
+ EXEC_STATE_INACTIVE));
+
+ mali_executor_change_state_pp_physical(groups[i],
+ &group_list_inactive,
+ &group_list_inactive_count,
+ &group_list_idle,
+ &group_list_idle_count);
+ }
+
+ do_schedule = MALI_TRUE;
+ }
+
+ if (mali_executor_has_virtual_group() &&
+ MALI_TRUE == child_groups_activated &&
+ MALI_GROUP_STATE_ACTIVATION_PENDING ==
+ mali_group_get_state(virtual_group)) {
+ /*
+ * Try to active virtual group while it may be not sucessful every time,
+ * because there is one situation that not all of child groups are powered on
+ * in one time and virtual group is in activation pending state.
+ */
+ if (mali_group_set_active(virtual_group)) {
+ /* Move group from inactive to idle */
+ MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE ==
+ virtual_group_state);
+ virtual_group_state = EXEC_STATE_IDLE;
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated, 1 virtual activated.\n", num_groups, num_activated));
+ } else {
+ MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated));
+ }
+ } else {
+ MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated));
+ }
+
+ if (MALI_TRUE == do_schedule) {
+ /* Trigger a schedule */
+ mali_executor_schedule();
+ }
+
+ mali_executor_unlock();
+}
+
+void mali_executor_group_power_down(struct mali_group *groups[],
+ u32 num_groups)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(groups);
+ MALI_DEBUG_ASSERT(0 < num_groups);
+
+ mali_executor_lock();
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups\n", num_groups));
+
+ for (i = 0; i < num_groups; i++) {
+ /* Groups must be either disabled or inactive. while for virtual group,
+ * it maybe in empty state, because when we meet pm_runtime_suspend,
+ * virtual group could be powered off, and before we acquire mali_executor_lock,
+ * we must release mali_pm_state_lock, if there is a new physical job was queued,
+ * all of physical groups in virtual group could be pulled out, so we only can
+ * powered down an empty virtual group. Those physical groups will be powered
+ * up in following pm_runtime_resume callback function.
+ */
+ MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(groups[i],
+ EXEC_STATE_DISABLED) ||
+ mali_executor_group_is_in_state(groups[i],
+ EXEC_STATE_INACTIVE) ||
+ mali_executor_group_is_in_state(groups[i],
+ EXEC_STATE_EMPTY));
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering down group %s\n",
+ mali_group_core_description(groups[i])));
+
+ mali_group_power_down(groups[i]);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups completed\n", num_groups));
+
+ mali_executor_unlock();
+}
+
+void mali_executor_abort_session(struct mali_session_data *session)
+{
+ struct mali_group *group;
+ struct mali_group *tmp_group;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(session->is_aborting);
+
+ MALI_DEBUG_PRINT(3,
+ ("Executor: Aborting all jobs from session 0x%08X.\n",
+ session));
+
+ mali_executor_lock();
+
+ if (mali_group_get_session(gp_group) == session) {
+ if (EXEC_STATE_WORKING == gp_group_state) {
+ struct mali_gp_job *gp_job = NULL;
+
+ mali_executor_complete_group(gp_group, MALI_FALSE, &gp_job, NULL);
+
+ MALI_DEBUG_ASSERT_POINTER(gp_job);
+
+ /* GP job completed, make sure it is freed */
+ mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+ MALI_FALSE, MALI_TRUE);
+ } else {
+ /* Same session, but not working, so just clear it */
+ mali_group_clear_session(gp_group);
+ }
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ if (EXEC_STATE_WORKING == virtual_group_state
+ && mali_group_get_session(virtual_group) == session) {
+ struct mali_pp_job *pp_job = NULL;
+
+ mali_executor_complete_group(virtual_group, MALI_FALSE, NULL, &pp_job);
+
+ if (NULL != pp_job) {
+ /* PP job completed, make sure it is freed */
+ mali_scheduler_complete_pp_job(pp_job, 0,
+ MALI_FALSE, MALI_TRUE);
+ }
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working,
+ struct mali_group, executor_list) {
+ if (mali_group_get_session(group) == session) {
+ struct mali_pp_job *pp_job = NULL;
+
+ mali_executor_complete_group(group, MALI_FALSE, NULL, &pp_job);
+
+ if (NULL != pp_job) {
+ /* PP job completed, make sure it is freed */
+ mali_scheduler_complete_pp_job(pp_job, 0,
+ MALI_FALSE, MALI_TRUE);
+ }
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_idle, struct mali_group, executor_list) {
+ mali_group_clear_session(group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_inactive, struct mali_group, executor_list) {
+ mali_group_clear_session(group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_disabled, struct mali_group, executor_list) {
+ mali_group_clear_session(group);
+ }
+
+ mali_executor_unlock();
+}
+
+
+void mali_executor_core_scaling_enable(void)
+{
+ /* PS: Core scaling is by default enabled */
+ core_scaling_enabled = MALI_TRUE;
+}
+
+void mali_executor_core_scaling_disable(void)
+{
+ core_scaling_enabled = MALI_FALSE;
+}
+
+mali_bool mali_executor_core_scaling_is_enabled(void)
+{
+ return core_scaling_enabled;
+}
+
+void mali_executor_group_enable(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_executor_lock();
+
+ if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group))
+ && (mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) {
+ mali_executor_group_enable_internal(group);
+ }
+
+ mali_executor_schedule();
+ mali_executor_unlock();
+
+ _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+}
+
+/*
+ * If a physical group is inactive or idle, we should disable it immediately,
+ * if group is in virtual, and virtual group is idle, disable given physical group in it.
+ */
+void mali_executor_group_disable(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_executor_lock();
+
+ if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group))
+ && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) {
+ mali_executor_group_disable_internal(group);
+ }
+
+ mali_executor_schedule();
+ mali_executor_unlock();
+
+ _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+}
+
+mali_bool mali_executor_group_is_disabled(struct mali_group *group)
+{
+ /* NB: This function is not optimized for time critical usage */
+
+ mali_bool ret;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_executor_lock();
+ ret = mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED);
+ mali_executor_unlock();
+
+ return ret;
+}
+
+int mali_executor_set_perf_level(unsigned int target_core_nr, mali_bool override)
+{
+ if (target_core_nr == num_physical_pp_cores_enabled) return 0;
+ if (MALI_FALSE == core_scaling_enabled && MALI_FALSE == override) return -EPERM;
+ if (target_core_nr > num_physical_pp_cores_total) return -EINVAL;
+ if (0 == target_core_nr) return -EINVAL;
+
+ mali_executor_core_scale(target_core_nr);
+
+ _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+
+ return 0;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_executor_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ mali_executor_lock();
+
+ switch (gp_group_state) {
+ case EXEC_STATE_INACTIVE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP group is in state INACTIVE\n");
+ break;
+ case EXEC_STATE_IDLE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP group is in state IDLE\n");
+ break;
+ case EXEC_STATE_WORKING:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP group is in state WORKING\n");
+ break;
+ default:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP group is in unknown/illegal state %u\n",
+ gp_group_state);
+ break;
+ }
+
+ n += mali_group_dump_state(gp_group, buf + n, size - n);
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP groups in WORKING state (count = %u):\n",
+ group_list_working_count);
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, executor_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP groups in IDLE state (count = %u):\n",
+ group_list_idle_count);
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP groups in INACTIVE state (count = %u):\n",
+ group_list_inactive_count);
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP groups in DISABLED state (count = %u):\n",
+ group_list_disabled_count);
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ switch (virtual_group_state) {
+ case EXEC_STATE_EMPTY:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in state EMPTY\n");
+ break;
+ case EXEC_STATE_INACTIVE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in state INACTIVE\n");
+ break;
+ case EXEC_STATE_IDLE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in state IDLE\n");
+ break;
+ case EXEC_STATE_WORKING:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in state WORKING\n");
+ break;
+ default:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in unknown/illegal state %u\n",
+ virtual_group_state);
+ break;
+ }
+
+ n += mali_group_dump_state(virtual_group, buf + n, size - n);
+ }
+
+ mali_executor_unlock();
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ return n;
+}
+#endif
+
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+ args->number_of_total_cores = num_physical_pp_cores_total;
+ args->number_of_enabled_cores = num_physical_pp_cores_enabled;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+ args->version = pp_version;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+ args->number_of_cores = 1;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+ args->version = gp_version;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_gp_job *job;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) {
+ _mali_osk_notification_t *new_notification = NULL;
+
+ new_notification = _mali_osk_notification_create(
+ _MALI_NOTIFICATION_GP_STALLED,
+ sizeof(_mali_uk_gp_job_suspended_s));
+
+ if (NULL != new_notification) {
+ MALI_DEBUG_PRINT(3, ("Executor: Resuming job %u with new heap; 0x%08X - 0x%08X\n",
+ args->cookie, args->arguments[0], args->arguments[1]));
+
+ mali_executor_lock();
+
+ /* Resume the job in question if it is still running */
+ job = mali_group_get_running_gp_job(gp_group);
+ if (NULL != job &&
+ args->cookie == mali_gp_job_get_id(job) &&
+ session == mali_gp_job_get_session(job)) {
+ /*
+ * Correct job is running, resume with new heap
+ */
+
+ mali_gp_job_set_oom_notification(job,
+ new_notification);
+
+ /* This will also re-enable interrupts */
+ mali_group_resume_gp_with_new_heap(gp_group,
+ args->cookie,
+ args->arguments[0],
+ args->arguments[1]);
+
+ job->heap_base_addr = args->arguments[0];
+ job->heap_current_addr = args->arguments[0];
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_OK;
+ } else {
+ MALI_PRINT_ERROR(("Executor: Unable to resume, GP job no longer running.\n"));
+
+ _mali_osk_notification_delete(new_notification);
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ } else {
+ MALI_PRINT_ERROR(("Executor: Failed to allocate notification object. Will abort GP job.\n"));
+ }
+ } else {
+ MALI_DEBUG_PRINT(2, ("Executor: Aborting job %u, no new heap provided\n", args->cookie));
+ }
+
+ mali_executor_lock();
+
+ /* Abort the job in question if it is still running */
+ job = mali_group_get_running_gp_job(gp_group);
+ if (NULL != job &&
+ args->cookie == mali_gp_job_get_id(job) &&
+ session == mali_gp_job_get_session(job)) {
+ /* Correct job is still running */
+ struct mali_gp_job *job_done = NULL;
+
+ mali_executor_complete_group(gp_group, MALI_FALSE, &job_done, NULL);
+
+ /* The same job should have completed */
+ MALI_DEBUG_ASSERT(job_done == job);
+
+ /* GP job completed, make sure it is freed */
+ mali_scheduler_complete_gp_job(job_done, MALI_FALSE,
+ MALI_TRUE, MALI_TRUE);
+ }
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+}
+
+
+/*
+ * ---------- Implementation of static functions ----------
+ */
+
+void mali_executor_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(mali_executor_lock_obj);
+ MALI_DEBUG_PRINT(5, ("Executor: lock taken\n"));
+}
+
+void mali_executor_unlock(void)
+{
+ MALI_DEBUG_PRINT(5, ("Executor: Releasing lock\n"));
+ _mali_osk_spinlock_irq_unlock(mali_executor_lock_obj);
+}
+
+static mali_bool mali_executor_is_suspended(void *data)
+{
+ mali_bool ret;
+
+ /* This callback does not use the data pointer. */
+ MALI_IGNORE(data);
+
+ mali_executor_lock();
+
+ ret = pause_count > 0 && !mali_executor_is_working();
+
+ mali_executor_unlock();
+
+ return ret;
+}
+
+static mali_bool mali_executor_is_working()
+{
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ return (0 != group_list_working_count ||
+ EXEC_STATE_WORKING == gp_group_state ||
+ EXEC_STATE_WORKING == virtual_group_state);
+}
+
+static void mali_executor_disable_empty_virtual(void)
+{
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_EMPTY);
+ MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_WORKING);
+
+ if (mali_group_is_empty(virtual_group)) {
+ virtual_group_state = EXEC_STATE_EMPTY;
+ }
+}
+
+static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group)
+{
+ mali_bool trigger_pm_update = MALI_FALSE;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ /* Only rejoining after job has completed (still active) */
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE ==
+ mali_group_get_state(group));
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_has_virtual_group());
+ MALI_DEBUG_ASSERT(MALI_FALSE == mali_group_is_virtual(group));
+
+ /* Make sure group and virtual group have same status */
+
+ if (MALI_GROUP_STATE_INACTIVE == mali_group_get_state(virtual_group)) {
+ if (mali_group_deactivate(group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+
+ if (virtual_group_state == EXEC_STATE_EMPTY) {
+ virtual_group_state = EXEC_STATE_INACTIVE;
+ }
+ } else if (MALI_GROUP_STATE_ACTIVATION_PENDING ==
+ mali_group_get_state(virtual_group)) {
+ /*
+ * Activation is pending for virtual group, leave
+ * this child group as active.
+ */
+ if (virtual_group_state == EXEC_STATE_EMPTY) {
+ virtual_group_state = EXEC_STATE_INACTIVE;
+ }
+ } else {
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE ==
+ mali_group_get_state(virtual_group));
+
+ if (virtual_group_state == EXEC_STATE_EMPTY) {
+ virtual_group_state = EXEC_STATE_IDLE;
+ }
+ }
+
+ /* Remove group from idle list */
+ MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group,
+ EXEC_STATE_IDLE));
+ _mali_osk_list_delinit(&group->executor_list);
+ group_list_idle_count--;
+
+ /*
+ * And finally rejoin the virtual group
+ * group will start working on same job as virtual_group,
+ * if virtual_group is working on a job
+ */
+ mali_group_add_group(virtual_group, group);
+
+ return trigger_pm_update;
+}
+
+static mali_bool mali_executor_has_virtual_group(void)
+{
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+ return (NULL != virtual_group) ? MALI_TRUE : MALI_FALSE;
+#else
+ return MALI_FALSE;
+#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */
+}
+
+static mali_bool mali_executor_virtual_group_is_usable(void)
+{
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return ((EXEC_STATE_INACTIVE == virtual_group_state ||
+ EXEC_STATE_IDLE == virtual_group_state) && (virtual_group->state != MALI_GROUP_STATE_ACTIVATION_PENDING)) ?
+ MALI_TRUE : MALI_FALSE;
+#else
+ return MALI_FALSE;
+#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */
+}
+
+static mali_bool mali_executor_tackle_gp_bound(void)
+{
+ struct mali_pp_job *job;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ job = mali_scheduler_job_pp_physical_peek();
+
+ if (NULL != job && MALI_TRUE == mali_is_mali400()) {
+ if (0 < group_list_working_count &&
+ mali_pp_job_is_large_and_unstarted(job)) {
+ return MALI_TRUE;
+ }
+ }
+
+ return MALI_FALSE;
+}
+
+/*
+ * This is where jobs are actually started.
+ */
+static void mali_executor_schedule(void)
+{
+ u32 i;
+ u32 num_physical_needed = 0;
+ u32 num_physical_to_process = 0;
+ mali_bool trigger_pm_update = MALI_FALSE;
+ mali_bool deactivate_idle_group = MALI_TRUE;
+
+ /* Physical groups + jobs to start in this function */
+ struct mali_group *groups_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+ struct mali_pp_job *jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+ u32 sub_jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+ int num_jobs_to_start = 0;
+
+ /* Virtual job to start in this function */
+ struct mali_pp_job *virtual_job_to_start = NULL;
+
+ /* GP job to start in this function */
+ struct mali_gp_job *gp_job_to_start = NULL;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (pause_count > 0) {
+ /* Execution is suspended, don't schedule any jobs. */
+ return;
+ }
+
+ /* Lock needed in order to safely handle the job queues */
+ mali_scheduler_lock();
+
+ /* 1. Activate gp firstly if have gp job queued. */
+ if (EXEC_STATE_INACTIVE == gp_group_state &&
+ 0 < mali_scheduler_job_gp_count()) {
+
+ enum mali_group_state state =
+ mali_group_activate(gp_group);
+ if (MALI_GROUP_STATE_ACTIVE == state) {
+ /* Set GP group state to idle */
+ gp_group_state = EXEC_STATE_IDLE;
+ } else {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+
+ /* 2. Prepare as many physical groups as needed/possible */
+
+ num_physical_needed = mali_scheduler_job_physical_head_count();
+
+ /* On mali-450 platform, we don't need to enter in this block frequently. */
+ if (0 < num_physical_needed) {
+
+ if (num_physical_needed <= group_list_idle_count) {
+ /* We have enough groups on idle list already */
+ num_physical_to_process = num_physical_needed;
+ num_physical_needed = 0;
+ } else {
+ /* We need to get a hold of some more groups */
+ num_physical_to_process = group_list_idle_count;
+ num_physical_needed -= group_list_idle_count;
+ }
+
+ if (0 < num_physical_needed) {
+
+ /* 2.1. Activate groups which are inactive */
+
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive,
+ struct mali_group, executor_list) {
+ enum mali_group_state state =
+ mali_group_activate(group);
+ if (MALI_GROUP_STATE_ACTIVE == state) {
+ /* Move from inactive to idle */
+ mali_executor_change_state_pp_physical(group,
+ &group_list_inactive,
+ &group_list_inactive_count,
+ &group_list_idle,
+ &group_list_idle_count);
+ num_physical_to_process++;
+ } else {
+ trigger_pm_update = MALI_TRUE;
+ }
+
+ num_physical_needed--;
+ if (0 == num_physical_needed) {
+ /* We have activated all the groups we need */
+ break;
+ }
+ }
+ }
+
+ if (mali_executor_virtual_group_is_usable()) {
+
+ /*
+ * 2.2. And finally, steal and activate groups
+ * from virtual group if we need even more
+ */
+ while (0 < num_physical_needed) {
+ struct mali_group *group;
+
+ group = mali_group_acquire_group(virtual_group);
+ if (NULL != group) {
+ enum mali_group_state state;
+
+ mali_executor_disable_empty_virtual();
+
+ state = mali_group_activate(group);
+ if (MALI_GROUP_STATE_ACTIVE == state) {
+ /* Group is ready, add to idle list */
+ _mali_osk_list_add(
+ &group->executor_list,
+ &group_list_idle);
+ group_list_idle_count++;
+ num_physical_to_process++;
+ } else {
+ /*
+ * Group is not ready yet,
+ * add to inactive list
+ */
+ _mali_osk_list_add(
+ &group->executor_list,
+ &group_list_inactive);
+ group_list_inactive_count++;
+
+ trigger_pm_update = MALI_TRUE;
+ }
+ num_physical_needed--;
+ } else {
+ /*
+ * We could not get enough groups
+ * from the virtual group.
+ */
+ break;
+ }
+ }
+ }
+
+ /* 2.3. Assign physical jobs to groups */
+
+ if (0 < num_physical_to_process) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle,
+ struct mali_group, executor_list) {
+ struct mali_pp_job *job = NULL;
+ u32 sub_job = MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+
+ MALI_DEBUG_ASSERT(num_jobs_to_start <
+ MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS);
+
+ MALI_DEBUG_ASSERT(0 <
+ mali_scheduler_job_physical_head_count());
+
+ if (mali_executor_hint_is_enabled(
+ MALI_EXECUTOR_HINT_GP_BOUND)) {
+ if (MALI_TRUE == mali_executor_tackle_gp_bound()) {
+ /*
+ * We're gp bound,
+ * don't start this right now.
+ */
+ deactivate_idle_group = MALI_FALSE;
+ num_physical_to_process = 0;
+ break;
+ }
+ }
+
+ job = mali_scheduler_job_pp_physical_get(
+ &sub_job);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(sub_job <= MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS);
+
+ /* Put job + group on list of jobs to start later on */
+
+ groups_to_start[num_jobs_to_start] = group;
+ jobs_to_start[num_jobs_to_start] = job;
+ sub_jobs_to_start[num_jobs_to_start] = sub_job;
+ num_jobs_to_start++;
+
+ /* Move group from idle to working */
+ mali_executor_change_state_pp_physical(group,
+ &group_list_idle,
+ &group_list_idle_count,
+ &group_list_working,
+ &group_list_working_count);
+
+ num_physical_to_process--;
+ if (0 == num_physical_to_process) {
+ /* Got all we needed */
+ break;
+ }
+ }
+ }
+ }
+
+
+ /* 3. Deactivate idle pp group , must put deactive here before active vitual group
+ * for cover case first only has physical job in normal queue but group inactive,
+ * so delay the job start go to active group, when group activated,
+ * call scheduler again, but now if we get high queue virtual job,
+ * we will do nothing in schedule cause executor schedule stop
+ */
+
+ if (MALI_TRUE == mali_executor_deactivate_list_idle(deactivate_idle_group
+ && (!mali_timeline_has_physical_pp_job()))) {
+ trigger_pm_update = MALI_TRUE;
+ }
+
+ /* 4. Activate virtual group, if needed */
+
+ if (EXEC_STATE_INACTIVE == virtual_group_state &&
+ 0 < mali_scheduler_job_next_is_virtual()) {
+ enum mali_group_state state =
+ mali_group_activate(virtual_group);
+ if (MALI_GROUP_STATE_ACTIVE == state) {
+ /* Set virtual group state to idle */
+ virtual_group_state = EXEC_STATE_IDLE;
+ } else {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+
+ /* 5. To power up group asap, we trigger pm update here. */
+
+ if (MALI_TRUE == trigger_pm_update) {
+ trigger_pm_update = MALI_FALSE;
+ mali_pm_update_async();
+ }
+
+ /* 6. Assign jobs to idle virtual group (or deactivate if no job) */
+
+ if (EXEC_STATE_IDLE == virtual_group_state) {
+ if (0 < mali_scheduler_job_next_is_virtual()) {
+ virtual_job_to_start =
+ mali_scheduler_job_pp_virtual_get();
+ virtual_group_state = EXEC_STATE_WORKING;
+ } else if (!mali_timeline_has_virtual_pp_job()) {
+ virtual_group_state = EXEC_STATE_INACTIVE;
+
+ if (mali_group_deactivate(virtual_group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+ }
+
+ /* 7. Assign job to idle GP group (or deactivate if no job) */
+
+ if (EXEC_STATE_IDLE == gp_group_state) {
+ if (0 < mali_scheduler_job_gp_count()) {
+ gp_job_to_start = mali_scheduler_job_gp_get();
+ gp_group_state = EXEC_STATE_WORKING;
+ } else if (!mali_timeline_has_gp_job()) {
+ gp_group_state = EXEC_STATE_INACTIVE;
+ if (mali_group_deactivate(gp_group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+ }
+
+ /* 8. We no longer need the schedule/queue lock */
+
+ mali_scheduler_unlock();
+
+ /* 9. start jobs */
+
+ if (NULL != virtual_job_to_start) {
+ MALI_DEBUG_ASSERT(!mali_group_pp_is_active(virtual_group));
+ mali_group_start_pp_job(virtual_group,
+ virtual_job_to_start, 0);
+ }
+
+ for (i = 0; i < num_jobs_to_start; i++) {
+ MALI_DEBUG_ASSERT(!mali_group_pp_is_active(
+ groups_to_start[i]));
+ mali_group_start_pp_job(groups_to_start[i],
+ jobs_to_start[i],
+ sub_jobs_to_start[i]);
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(gp_group);
+
+ if (NULL != gp_job_to_start) {
+ MALI_DEBUG_ASSERT(!mali_group_gp_is_active(gp_group));
+ mali_group_start_gp_job(gp_group, gp_job_to_start);
+ }
+
+ /* 10. Trigger any pending PM updates */
+ if (MALI_TRUE == trigger_pm_update) {
+ mali_pm_update_async();
+ }
+}
+
+/* Handler for deferred schedule requests */
+static void mali_executor_wq_schedule(void *arg)
+{
+ MALI_IGNORE(arg);
+ mali_executor_lock();
+ mali_executor_schedule();
+ mali_executor_unlock();
+}
+
+static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job, u32 added_size)
+{
+ _mali_uk_gp_job_suspended_s *jobres;
+ _mali_osk_notification_t *notification;
+
+ notification = mali_gp_job_get_oom_notification(job);
+
+ /*
+ * Remember the id we send to user space, so we have something to
+ * verify when we get a response
+ */
+ gp_returned_cookie = mali_gp_job_get_id(job);
+
+ jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer;
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ jobres->cookie = gp_returned_cookie;
+ jobres->heap_added_size = added_size;
+ mali_session_send_notification(mali_gp_job_get_session(job),
+ notification);
+}
+static struct mali_gp_job *mali_executor_complete_gp(struct mali_group *group,
+ mali_bool success)
+{
+ struct mali_gp_job *job;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ /* Extracts the needed HW status from core and reset */
+ job = mali_group_complete_gp(group, success);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* Core is now ready to go into idle list */
+ gp_group_state = EXEC_STATE_IDLE;
+
+ /* This will potentially queue more GP and PP jobs */
+ mali_timeline_tracker_release(&job->tracker);
+
+ /* Signal PP job */
+ mali_gp_job_signal_pp_tracker(job, success);
+
+ return job;
+}
+
+static struct mali_pp_job *mali_executor_complete_pp(struct mali_group *group,
+ mali_bool success)
+{
+ struct mali_pp_job *job;
+ u32 sub_job;
+ mali_bool job_is_done;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ /* Extracts the needed HW status from core and reset */
+ job = mali_group_complete_pp(group, success, &sub_job);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* Core is now ready to go into idle list */
+ if (mali_group_is_virtual(group)) {
+ virtual_group_state = EXEC_STATE_IDLE;
+ } else {
+ /* Move from working to idle state */
+ mali_executor_change_state_pp_physical(group,
+ &group_list_working,
+ &group_list_working_count,
+ &group_list_idle,
+ &group_list_idle_count);
+ }
+
+ /* It is the executor module which owns the jobs themselves by now */
+ mali_pp_job_mark_sub_job_completed(job, success);
+ job_is_done = mali_pp_job_is_complete(job);
+
+ if (job_is_done) {
+ /* This will potentially queue more GP and PP jobs */
+ mali_timeline_tracker_release(&job->tracker);
+ }
+
+ return job;
+}
+
+static void mali_executor_complete_group(struct mali_group *group,
+ mali_bool success,
+ struct mali_gp_job **gp_job_done,
+ struct mali_pp_job **pp_job_done)
+{
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+ struct mali_gp_job *gp_job = NULL;
+ struct mali_pp_job *pp_job = NULL;
+ mali_bool pp_job_is_done = MALI_TRUE;
+
+ if (NULL != gp_core) {
+ gp_job = mali_executor_complete_gp(group, success);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(pp_core);
+ MALI_IGNORE(pp_core);
+ pp_job = mali_executor_complete_pp(group, success);
+
+ pp_job_is_done = mali_pp_job_is_complete(pp_job);
+ }
+
+ if (pause_count > 0) {
+ /* Execution has been suspended */
+
+ if (!mali_executor_is_working()) {
+ /* Last job completed, wake up sleepers */
+ _mali_osk_wait_queue_wake_up(
+ executor_working_wait_queue);
+ }
+ } else if (MALI_TRUE == mali_group_disable_requested(group)) {
+ mali_executor_core_scale_in_group_complete(group);
+
+ mali_executor_schedule();
+ } else {
+ /* try to schedule new jobs */
+ mali_executor_schedule();
+ }
+
+ if (NULL != gp_job) {
+ MALI_DEBUG_ASSERT_POINTER(gp_job_done);
+ *gp_job_done = gp_job;
+ } else if (pp_job_is_done) {
+ MALI_DEBUG_ASSERT_POINTER(pp_job);
+ MALI_DEBUG_ASSERT_POINTER(pp_job_done);
+ *pp_job_done = pp_job;
+ }
+}
+
+static void mali_executor_change_state_pp_physical(struct mali_group *group,
+ _mali_osk_list_t *old_list,
+ u32 *old_count,
+ _mali_osk_list_t *new_list,
+ u32 *new_count)
+{
+ /*
+ * It's a bit more complicated to change the state for the physical PP
+ * groups since their state is determined by the list they are on.
+ */
+#if defined(DEBUG)
+ mali_bool found = MALI_FALSE;
+ struct mali_group *group_iter;
+ struct mali_group *temp;
+ u32 old_counted = 0;
+ u32 new_counted = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(old_list);
+ MALI_DEBUG_ASSERT_POINTER(old_count);
+ MALI_DEBUG_ASSERT_POINTER(new_list);
+ MALI_DEBUG_ASSERT_POINTER(new_count);
+
+ /*
+ * Verify that group is present on old list,
+ * and that the count is correct
+ */
+
+ _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, old_list,
+ struct mali_group, executor_list) {
+ old_counted++;
+ if (group == group_iter) {
+ found = MALI_TRUE;
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, new_list,
+ struct mali_group, executor_list) {
+ new_counted++;
+ }
+
+ if (MALI_FALSE == found) {
+ if (old_list == &group_list_idle) {
+ MALI_DEBUG_PRINT(1, (" old Group list is idle,"));
+ } else if (old_list == &group_list_inactive) {
+ MALI_DEBUG_PRINT(1, (" old Group list is inactive,"));
+ } else if (old_list == &group_list_working) {
+ MALI_DEBUG_PRINT(1, (" old Group list is working,"));
+ } else if (old_list == &group_list_disabled) {
+ MALI_DEBUG_PRINT(1, (" old Group list is disable,"));
+ }
+
+ if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_WORKING)) {
+ MALI_DEBUG_PRINT(1, (" group in working \n"));
+ } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_INACTIVE)) {
+ MALI_DEBUG_PRINT(1, (" group in inactive \n"));
+ } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_IDLE)) {
+ MALI_DEBUG_PRINT(1, (" group in idle \n"));
+ } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)) {
+ MALI_DEBUG_PRINT(1, (" but group in disabled \n"));
+ }
+ }
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == found);
+ MALI_DEBUG_ASSERT(0 < (*old_count));
+ MALI_DEBUG_ASSERT((*old_count) == old_counted);
+ MALI_DEBUG_ASSERT((*new_count) == new_counted);
+#endif
+
+ _mali_osk_list_move(&group->executor_list, new_list);
+ (*old_count)--;
+ (*new_count)++;
+}
+
+static void mali_executor_set_state_pp_physical(struct mali_group *group,
+ _mali_osk_list_t *new_list,
+ u32 *new_count)
+{
+ _mali_osk_list_add(&group->executor_list, new_list);
+ (*new_count)++;
+}
+
+static mali_bool mali_executor_group_is_in_state(struct mali_group *group,
+ enum mali_executor_state_t state)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (gp_group == group) {
+ if (gp_group_state == state) {
+ return MALI_TRUE;
+ }
+ } else if (virtual_group == group || mali_group_is_in_virtual(group)) {
+ if (virtual_group_state == state) {
+ return MALI_TRUE;
+ }
+ } else {
+ /* Physical PP group */
+ struct mali_group *group_iter;
+ struct mali_group *temp;
+ _mali_osk_list_t *list;
+
+ if (EXEC_STATE_DISABLED == state) {
+ list = &group_list_disabled;
+ } else if (EXEC_STATE_INACTIVE == state) {
+ list = &group_list_inactive;
+ } else if (EXEC_STATE_IDLE == state) {
+ list = &group_list_idle;
+ } else {
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING == state);
+ list = &group_list_working;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, list,
+ struct mali_group, executor_list) {
+ if (group_iter == group) {
+ return MALI_TRUE;
+ }
+ }
+ }
+
+ /* group not in correct state */
+ return MALI_FALSE;
+}
+
+static void mali_executor_group_enable_internal(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED));
+
+ /* Put into inactive state (== "lowest" enabled state) */
+ if (group == gp_group) {
+ MALI_DEBUG_ASSERT(EXEC_STATE_DISABLED == gp_group_state);
+ gp_group_state = EXEC_STATE_INACTIVE;
+ } else {
+ mali_executor_change_state_pp_physical(group,
+ &group_list_disabled,
+ &group_list_disabled_count,
+ &group_list_inactive,
+ &group_list_inactive_count);
+
+ ++num_physical_pp_cores_enabled;
+ MALI_DEBUG_PRINT(4, ("Enabling group id %d \n", group->pp_core->core_id));
+ }
+
+ if (MALI_GROUP_STATE_ACTIVE == mali_group_activate(group)) {
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_power_is_on(group));
+
+ /* Move from inactive to idle */
+ if (group == gp_group) {
+ gp_group_state = EXEC_STATE_IDLE;
+ } else {
+ mali_executor_change_state_pp_physical(group,
+ &group_list_inactive,
+ &group_list_inactive_count,
+ &group_list_idle,
+ &group_list_idle_count);
+
+ if (mali_executor_has_virtual_group()) {
+ if (mali_executor_physical_rejoin_virtual(group)) {
+ mali_pm_update_async();
+ }
+ }
+ }
+ } else {
+ mali_pm_update_async();
+ }
+}
+
+static void mali_executor_group_disable_internal(struct mali_group *group)
+{
+ mali_bool working;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED));
+
+ working = mali_executor_group_is_in_state(group, EXEC_STATE_WORKING);
+ if (MALI_TRUE == working) {
+ /** Group to be disabled once it completes current work,
+ * when virtual group completes, also check child groups for this flag */
+ mali_group_set_disable_request(group, MALI_TRUE);
+ return;
+ }
+
+ /* Put into disabled state */
+ if (group == gp_group) {
+ /* GP group */
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state);
+ gp_group_state = EXEC_STATE_DISABLED;
+ } else {
+ if (mali_group_is_in_virtual(group)) {
+ /* A child group of virtual group. move the specific group from virtual group */
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state);
+
+ mali_executor_set_state_pp_physical(group,
+ &group_list_disabled,
+ &group_list_disabled_count);
+
+ mali_group_remove_group(virtual_group, group);
+ mali_executor_disable_empty_virtual();
+ } else {
+ mali_executor_change_group_status_disabled(group);
+ }
+
+ --num_physical_pp_cores_enabled;
+ MALI_DEBUG_PRINT(4, ("Disabling group id %d \n", group->pp_core->core_id));
+ }
+
+ if (MALI_GROUP_STATE_INACTIVE != group->state) {
+ if (MALI_TRUE == mali_group_deactivate(group)) {
+ mali_pm_update_async();
+ }
+ }
+}
+
+static void mali_executor_notify_core_change(u32 num_cores)
+{
+ mali_bool done = MALI_FALSE;
+
+ if (mali_is_mali450() || mali_is_mali470()) {
+ return;
+ }
+
+ /*
+ * This function gets a bit complicated because we can't hold the session lock while
+ * allocating notification objects.
+ */
+ while (!done) {
+ u32 i;
+ u32 num_sessions_alloc;
+ u32 num_sessions_with_lock;
+ u32 used_notification_objects = 0;
+ _mali_osk_notification_t **notobjs;
+
+ /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
+ num_sessions_alloc = mali_session_get_count();
+ if (0 == num_sessions_alloc) {
+ /* No sessions to report to */
+ return;
+ }
+
+ notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
+ if (NULL == notobjs) {
+ MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
+ /* there is probably no point in trying again, system must be really low on memory and probably unusable now anyway */
+ return;
+ }
+
+ for (i = 0; i < num_sessions_alloc; i++) {
+ notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_NUM_CORE_CHANGE, sizeof(_mali_uk_pp_num_cores_changed_s));
+ if (NULL != notobjs[i]) {
+ _mali_uk_pp_num_cores_changed_s *data = notobjs[i]->result_buffer;
+ data->number_of_enabled_cores = num_cores;
+ } else {
+ MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure %u)\n", i));
+ }
+ }
+
+ mali_session_lock();
+
+ /* number of sessions will not change while we hold the lock */
+ num_sessions_with_lock = mali_session_get_count();
+
+ if (num_sessions_alloc >= num_sessions_with_lock) {
+ /* We have allocated enough notification objects for all the sessions atm */
+ struct mali_session_data *session, *tmp;
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
+ if (NULL != notobjs[used_notification_objects]) {
+ mali_session_send_notification(session, notobjs[used_notification_objects]);
+ notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
+ }
+ used_notification_objects++;
+ }
+ done = MALI_TRUE;
+ }
+
+ mali_session_unlock();
+
+ /* Delete any remaining/unused notification objects */
+ for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
+ if (NULL != notobjs[used_notification_objects]) {
+ _mali_osk_notification_delete(notobjs[used_notification_objects]);
+ }
+ }
+
+ _mali_osk_free(notobjs);
+ }
+}
+
+static mali_bool mali_executor_core_scaling_is_done(void *data)
+{
+ u32 i;
+ u32 num_groups;
+ mali_bool ret = MALI_TRUE;
+
+ MALI_IGNORE(data);
+
+ mali_executor_lock();
+
+ num_groups = mali_group_get_glob_num_groups();
+
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ if (NULL != group) {
+ if (MALI_TRUE == group->disable_requested && NULL != mali_group_get_pp_core(group)) {
+ ret = MALI_FALSE;
+ break;
+ }
+ }
+ }
+ mali_executor_unlock();
+
+ return ret;
+}
+
+static void mali_executor_wq_notify_core_change(void *arg)
+{
+ MALI_IGNORE(arg);
+
+ if (mali_is_mali450() || mali_is_mali470()) {
+ return;
+ }
+
+ _mali_osk_wait_queue_wait_event(executor_notify_core_change_wait_queue,
+ mali_executor_core_scaling_is_done, NULL);
+
+ mali_executor_notify_core_change(num_physical_pp_cores_enabled);
+}
+
+/**
+ * Clear all disable request from the _last_ core scaling behavior.
+ */
+static void mali_executor_core_scaling_reset(void)
+{
+ u32 i;
+ u32 num_groups;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ num_groups = mali_group_get_glob_num_groups();
+
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ if (NULL != group) {
+ group->disable_requested = MALI_FALSE;
+ }
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ core_scaling_delay_up_mask[i] = 0;
+ }
+}
+
+static void mali_executor_core_scale(unsigned int target_core_nr)
+{
+ int current_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+ int target_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+ int i;
+
+ MALI_DEBUG_ASSERT(0 < target_core_nr);
+ MALI_DEBUG_ASSERT(num_physical_pp_cores_total >= target_core_nr);
+
+ mali_executor_lock();
+
+ if (target_core_nr < num_physical_pp_cores_enabled) {
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, num_physical_pp_cores_enabled - target_core_nr));
+ } else {
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - num_physical_pp_cores_enabled));
+ }
+
+ /* When a new core scaling request is comming, we should remove the un-doing
+ * part of the last core scaling request. It's safe because we have only one
+ * lock(executor lock) protection. */
+ mali_executor_core_scaling_reset();
+
+ mali_pm_get_best_power_cost_mask(num_physical_pp_cores_enabled, current_core_scaling_mask);
+ mali_pm_get_best_power_cost_mask(target_core_nr, target_core_scaling_mask);
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ target_core_scaling_mask[i] = target_core_scaling_mask[i] - current_core_scaling_mask[i];
+ MALI_DEBUG_PRINT(5, ("target_core_scaling_mask[%d] = %d\n", i, target_core_scaling_mask[i]));
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0 > target_core_scaling_mask[i]) {
+ struct mali_pm_domain *domain;
+
+ domain = mali_pm_domain_get_from_index(i);
+
+ /* Domain is valid and has pp cores */
+ if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+ if (NULL != mali_group_get_pp_core(group) && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))
+ && (!mali_group_is_virtual(group))) {
+ mali_executor_group_disable_internal(group);
+ target_core_scaling_mask[i]++;
+ if ((0 == target_core_scaling_mask[i])) {
+ break;
+ }
+
+ }
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ /**
+ * Target_core_scaling_mask[i] is bigger than 0,
+ * means we need to enable some pp cores in
+ * this domain whose domain index is i.
+ */
+ if (0 < target_core_scaling_mask[i]) {
+ struct mali_pm_domain *domain;
+
+ if (num_physical_pp_cores_enabled >= target_core_nr) {
+ break;
+ }
+
+ domain = mali_pm_domain_get_from_index(i);
+
+ /* Domain is valid and has pp cores */
+ if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+ if (NULL != mali_group_get_pp_core(group) && mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)
+ && (!mali_group_is_virtual(group))) {
+ mali_executor_group_enable_internal(group);
+ target_core_scaling_mask[i]--;
+
+ if ((0 == target_core_scaling_mask[i]) || num_physical_pp_cores_enabled == target_core_nr) {
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Here, we may still have some pp cores not been enabled because of some
+ * pp cores need to be disabled are still in working state.
+ */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0 < target_core_scaling_mask[i]) {
+ core_scaling_delay_up_mask[i] = target_core_scaling_mask[i];
+ }
+ }
+
+ mali_executor_schedule();
+ mali_executor_unlock();
+}
+
+static void mali_executor_core_scale_in_group_complete(struct mali_group *group)
+{
+ int num_pp_cores_disabled = 0;
+ int num_pp_cores_to_enable = 0;
+ int i;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_disable_requested(group));
+
+ /* Disable child group of virtual group */
+ if (mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ if (MALI_TRUE == mali_group_disable_requested(child)) {
+ mali_group_set_disable_request(child, MALI_FALSE);
+ mali_executor_group_disable_internal(child);
+ num_pp_cores_disabled++;
+ }
+ }
+ mali_group_set_disable_request(group, MALI_FALSE);
+ } else {
+ mali_executor_group_disable_internal(group);
+ mali_group_set_disable_request(group, MALI_FALSE);
+ if (NULL != mali_group_get_pp_core(group)) {
+ num_pp_cores_disabled++;
+ }
+ }
+
+ num_pp_cores_to_enable = num_pp_cores_disabled;
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0 < core_scaling_delay_up_mask[i]) {
+ struct mali_pm_domain *domain;
+
+ if (0 == num_pp_cores_to_enable) {
+ break;
+ }
+
+ domain = mali_pm_domain_get_from_index(i);
+
+ /* Domain is valid and has pp cores */
+ if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+ struct mali_group *disabled_group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(disabled_group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+ if (NULL != mali_group_get_pp_core(disabled_group) && mali_executor_group_is_in_state(disabled_group, EXEC_STATE_DISABLED)) {
+ mali_executor_group_enable_internal(disabled_group);
+ core_scaling_delay_up_mask[i]--;
+ num_pp_cores_to_enable--;
+
+ if ((0 == core_scaling_delay_up_mask[i]) || 0 == num_pp_cores_to_enable) {
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ _mali_osk_wait_queue_wake_up(executor_notify_core_change_wait_queue);
+}
+
+static void mali_executor_change_group_status_disabled(struct mali_group *group)
+{
+ /* Physical PP group */
+ mali_bool idle;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ idle = mali_executor_group_is_in_state(group, EXEC_STATE_IDLE);
+ if (MALI_TRUE == idle) {
+ mali_executor_change_state_pp_physical(group,
+ &group_list_idle,
+ &group_list_idle_count,
+ &group_list_disabled,
+ &group_list_disabled_count);
+ } else {
+ mali_executor_change_state_pp_physical(group,
+ &group_list_inactive,
+ &group_list_inactive_count,
+ &group_list_disabled,
+ &group_list_disabled_count);
+ }
+}
+
+static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group)
+{
+ mali_bool trigger_pm_update = MALI_FALSE;
+
+ if (group_list_idle_count > 0) {
+ if (mali_executor_has_virtual_group()) {
+
+ /* Rejoin virtual group on Mali-450 */
+
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+ &group_list_idle,
+ struct mali_group, executor_list) {
+ if (mali_executor_physical_rejoin_virtual(
+ group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+ } else if (deactivate_idle_group) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ /* Deactivate group on Mali-300/400 */
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+ &group_list_idle,
+ struct mali_group, executor_list) {
+ if (mali_group_deactivate(group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+
+ /* Move from idle to inactive */
+ mali_executor_change_state_pp_physical(group,
+ &group_list_idle,
+ &group_list_idle_count,
+ &group_list_inactive,
+ &group_list_inactive_count);
+ }
+ }
+ }
+
+ return trigger_pm_update;
+}
+
+void mali_executor_running_status_print(void)
+{
+ struct mali_group *group = NULL;
+ struct mali_group *temp = NULL;
+
+ MALI_PRINT(("GP running job: %p\n", gp_group->gp_running_job));
+ if ((gp_group->gp_core) && (gp_group->is_working)) {
+ mali_group_dump_status(gp_group);
+ }
+ MALI_PRINT(("Physical PP groups in WORKING state (count = %u):\n", group_list_working_count));
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, executor_list) {
+ MALI_PRINT(("PP running job: %p, subjob %d \n", group->pp_running_job, group->pp_running_sub_job));
+ mali_group_dump_status(group);
+ }
+ MALI_PRINT(("Physical PP groups in INACTIVE state (count = %u):\n", group_list_inactive_count));
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
+ MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off"));
+ MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description));
+ }
+ MALI_PRINT(("Physical PP groups in IDLE state (count = %u):\n", group_list_idle_count));
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
+ MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off"));
+ MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description));
+ }
+ MALI_PRINT(("Physical PP groups in DISABLED state (count = %u):\n", group_list_disabled_count));
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
+ MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off"));
+ MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description));
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ MALI_PRINT(("Virtual group running job: %p\n", virtual_group->pp_running_job));
+ MALI_PRINT(("Virtual group status: %d\n", virtual_group_state));
+ MALI_PRINT(("Virtual group->status: %d\n", virtual_group->state));
+ MALI_PRINT(("\tSW power: %s\n", virtual_group->power_is_on ? "On" : "Off"));
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &virtual_group->group_list,
+ struct mali_group, group_list) {
+ int i = 0;
+ MALI_PRINT(("\tchild group(%s) running job: %p\n", group->pp_core->hw_core.description, group->pp_running_job));
+ MALI_PRINT(("\tchild group(%s)->status: %d\n", group->pp_core->hw_core.description, group->state));
+ MALI_PRINT(("\tchild group(%s) SW power: %s\n", group->pp_core->hw_core.description, group->power_is_on ? "On" : "Off"));
+ if (group->pm_domain) {
+ MALI_PRINT(("\tPower domain: id %u\n", mali_pm_domain_get_id(group->pm_domain)));
+ MALI_PRINT(("\tMask:0x%04x \n", mali_pm_domain_get_mask(group->pm_domain)));
+ MALI_PRINT(("\tUse-count:%u \n", mali_pm_domain_get_use_count(group->pm_domain)));
+ MALI_PRINT(("\tCurrent power status:%s \n", (mali_pm_domain_get_mask(group->pm_domain)& mali_pm_get_current_mask()) ? "On" : "Off"));
+ MALI_PRINT(("\tWanted power status:%s \n", (mali_pm_domain_get_mask(group->pm_domain)& mali_pm_get_wanted_mask()) ? "On" : "Off"));
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (NULL != group->l2_cache_core[i]) {
+ struct mali_pm_domain *domain;
+ domain = mali_l2_cache_get_pm_domain(group->l2_cache_core[i]);
+ MALI_PRINT(("\t L2(index %d) group SW power: %s\n", i, group->l2_cache_core[i]->power_is_on ? "On" : "Off"));
+ if (domain) {
+ MALI_PRINT(("\tL2 Power domain: id %u\n", mali_pm_domain_get_id(domain)));
+ MALI_PRINT(("\tL2 Mask:0x%04x \n", mali_pm_domain_get_mask(domain)));
+ MALI_PRINT(("\tL2 Use-count:%u \n", mali_pm_domain_get_use_count(domain)));
+ MALI_PRINT(("\tL2 Current power status:%s \n", (mali_pm_domain_get_mask(domain) & mali_pm_get_current_mask()) ? "On" : "Off"));
+ MALI_PRINT(("\tL2 Wanted power status:%s \n", (mali_pm_domain_get_mask(domain) & mali_pm_get_wanted_mask()) ? "On" : "Off"));
+ }
+ }
+ }
+ }
+ if (EXEC_STATE_WORKING == virtual_group_state) {
+ mali_group_dump_status(virtual_group);
+ }
+ }
+}
+
+void mali_executor_status_dump(void)
+{
+ mali_executor_lock();
+ mali_scheduler_lock();
+
+ /* print schedule queue status */
+ mali_scheduler_gp_pp_job_queue_print();
+
+ mali_scheduler_unlock();
+ mali_executor_unlock();
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_executor.h b/drivers/gpu/arm/utgard/common/mali_executor.h
new file mode 100644
index 000000000000..a756d3f40c2a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_executor.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2012, 2014-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_EXECUTOR_H__
+#define __MALI_EXECUTOR_H__
+
+#include "mali_osk.h"
+#include "mali_scheduler_types.h"
+#include "mali_kernel_common.h"
+
+typedef enum {
+ MALI_EXECUTOR_HINT_GP_BOUND = 0
+#define MALI_EXECUTOR_HINT_MAX 1
+} mali_executor_hint;
+
+extern mali_bool mali_executor_hints[MALI_EXECUTOR_HINT_MAX];
+
+/* forward declare struct instead of using include */
+struct mali_session_data;
+struct mali_group;
+struct mali_pp_core;
+
+extern _mali_osk_spinlock_irq_t *mali_executor_lock_obj;
+
+#define MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD() MALI_DEBUG_ASSERT_LOCK_HELD(mali_executor_lock_obj);
+
+_mali_osk_errcode_t mali_executor_initialize(void);
+void mali_executor_terminate(void);
+
+void mali_executor_populate(void);
+void mali_executor_depopulate(void);
+
+void mali_executor_suspend(void);
+void mali_executor_resume(void);
+
+u32 mali_executor_get_num_cores_total(void);
+u32 mali_executor_get_num_cores_enabled(void);
+struct mali_pp_core *mali_executor_get_virtual_pp(void);
+struct mali_group *mali_executor_get_virtual_group(void);
+
+void mali_executor_zap_all_active(struct mali_session_data *session);
+
+/**
+ * Schedule GP and PP according to bitmask.
+ *
+ * @param mask A scheduling bitmask.
+ * @param deferred_schedule MALI_TRUE if schedule should be deferred, MALI_FALSE if not.
+ */
+void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule);
+
+_mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group, mali_bool in_upper_half);
+_mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group, mali_bool in_upper_half);
+_mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group, mali_bool in_upper_half);
+
+void mali_executor_group_oom(struct mali_group *group);
+void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups);
+void mali_executor_group_power_down(struct mali_group *groups[], u32 num_groups);
+
+void mali_executor_abort_session(struct mali_session_data *session);
+
+void mali_executor_core_scaling_enable(void);
+void mali_executor_core_scaling_disable(void);
+mali_bool mali_executor_core_scaling_is_enabled(void);
+
+void mali_executor_group_enable(struct mali_group *group);
+void mali_executor_group_disable(struct mali_group *group);
+mali_bool mali_executor_group_is_disabled(struct mali_group *group);
+
+int mali_executor_set_perf_level(unsigned int target_core_nr, mali_bool override);
+
+#if MALI_STATE_TRACKING
+u32 mali_executor_dump_state(char *buf, u32 size);
+#endif
+
+MALI_STATIC_INLINE void mali_executor_hint_enable(mali_executor_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+ mali_executor_hints[hint] = MALI_TRUE;
+}
+
+MALI_STATIC_INLINE void mali_executor_hint_disable(mali_executor_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+ mali_executor_hints[hint] = MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_executor_hint_is_enabled(mali_executor_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+ return mali_executor_hints[hint];
+}
+
+void mali_executor_running_status_print(void);
+void mali_executor_status_dump(void);
+void mali_executor_lock(void);
+void mali_executor_unlock(void);
+#endif /* __MALI_EXECUTOR_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_gp.c b/drivers/gpu/arm/utgard/common/mali_gp.c
new file mode 100644
index 000000000000..a690781837e9
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_gp.c
@@ -0,0 +1,357 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "regs/mali_gp_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+static struct mali_gp_core *mali_global_gp_core = NULL;
+
+/* Interrupt handlers */
+static void mali_gp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group)
+{
+ struct mali_gp_core *core = NULL;
+
+ MALI_DEBUG_ASSERT(NULL == mali_global_gp_core);
+ MALI_DEBUG_PRINT(2, ("Mali GP: Creating Mali GP core: %s\n", resource->description));
+
+ core = _mali_osk_malloc(sizeof(struct mali_gp_core));
+ if (NULL != core) {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALIGP2_REGISTER_ADDRESS_SPACE_SIZE)) {
+ _mali_osk_errcode_t ret;
+
+ ret = mali_gp_reset(core);
+
+ if (_MALI_OSK_ERR_OK == ret) {
+ ret = mali_group_add_gp_core(group, core);
+ if (_MALI_OSK_ERR_OK == ret) {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ core->irq = _mali_osk_irq_init(resource->irq,
+ mali_group_upper_half_gp,
+ group,
+ mali_gp_irq_probe_trigger,
+ mali_gp_irq_probe_ack,
+ core,
+ resource->description);
+ if (NULL != core->irq) {
+ MALI_DEBUG_PRINT(4, ("Mali GP: set global gp core from 0x%08X to 0x%08X\n", mali_global_gp_core, core));
+ mali_global_gp_core = core;
+
+ return core;
+ } else {
+ MALI_PRINT_ERROR(("Mali GP: Failed to setup interrupt handlers for GP core %s\n", core->hw_core.description));
+ }
+ mali_group_remove_gp_core(group);
+ } else {
+ MALI_PRINT_ERROR(("Mali GP: Failed to add core %s to group\n", core->hw_core.description));
+ }
+ }
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ } else {
+ MALI_PRINT_ERROR(("Failed to allocate memory for GP core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_gp_delete(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ _mali_osk_irq_term(core->irq);
+ mali_hw_core_delete(&core->hw_core);
+ mali_global_gp_core = NULL;
+ _mali_osk_free(core);
+}
+
+void mali_gp_stop_bus(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core)
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Send the stop bus command. */
+ mali_gp_stop_bus(core);
+
+ /* Wait for bus to be stopped */
+ for (i = 0; i < MALI_REG_POLL_COUNT_SLOW; i++) {
+ if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED) {
+ break;
+ }
+ }
+
+ if (MALI_REG_POLL_COUNT_SLOW == i) {
+ MALI_PRINT_ERROR(("Mali GP: Failed to stop bus on %s\n", core->hw_core.description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_hard_reset(struct mali_gp_core *core)
+{
+ const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_LIMIT;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ const u32 reset_default_value = 0;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(4, ("Mali GP: Hard reset of core %s\n", core->hw_core.description));
+
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_invalid_value);
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value);
+ if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register)) {
+ break;
+ }
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Mali GP: The hard reset loop didn't work, unable to recover\n"));
+ }
+
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+}
+
+void mali_gp_reset_async(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: Reset of core %s\n", core->hw_core.description));
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALI400GP_REG_VAL_IRQ_RESET_COMPLETED);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALI400GP_REG_VAL_CMD_SOFT_RESET);
+
+}
+
+_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core)
+{
+ int i;
+ u32 rawstat = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ rawstat = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+ if (rawstat & MALI400GP_REG_VAL_IRQ_RESET_COMPLETED) {
+ break;
+ }
+ }
+
+ if (i == MALI_REG_POLL_COUNT_FAST) {
+ MALI_PRINT_ERROR(("Mali GP: Failed to reset core %s, rawstat: 0x%08x\n",
+ core->hw_core.description, rawstat));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core)
+{
+ mali_gp_reset_async(core);
+ return mali_gp_reset_wait(core);
+}
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job)
+{
+ u32 startcmd = 0;
+ u32 *frame_registers = mali_gp_job_get_frame_registers(job);
+ u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job);
+ u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job);
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ if (mali_gp_job_has_vs_job(job)) {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
+ }
+
+ if (mali_gp_job_has_plbu_job(job)) {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
+ }
+
+ MALI_DEBUG_ASSERT(0 != startcmd);
+
+ mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME);
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd));
+
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+
+ /* Barrier to make sure the previous register write is finished */
+ _mali_osk_write_mem_barrier();
+
+ /* This is the command that starts the core.
+ *
+ * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just
+ * force core to assert the completion interrupt.
+ */
+#if !defined(PROFILING_SKIP_GP_JOBS)
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd);
+#else
+ {
+ u32 bits = 0;
+
+ if (mali_gp_job_has_vs_job(job))
+ bits = MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST;
+ if (mali_gp_job_has_plbu_job(job))
+ bits |= MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST;
+
+ mali_hw_core_register_write_relaxed(&core->hw_core,
+ MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, bits);
+ }
+#endif
+
+ /* Barrier to make sure the previous register write is finished */
+ _mali_osk_write_mem_barrier();
+}
+
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr)
+{
+ u32 irq_readout;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+
+ if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | MALIGP2_REG_VAL_IRQ_HANG));
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); /* re-enable interrupts */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR, start_addr);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR, end_addr);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Resuming job\n"));
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+ _mali_osk_write_mem_barrier();
+ }
+ /*
+ * else: core has been reset between PLBU_OUT_OF_MEM interrupt and this new heap response.
+ * A timeout or a page fault on Mali-200 PP core can cause this behaviour.
+ */
+}
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VERSION);
+}
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void)
+{
+ return mali_global_gp_core;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static void mali_gp_irq_probe_trigger(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+ if (MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR & irq_readout) {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR);
+ _mali_osk_mem_barrier();
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/* ------ local helper functions below --------- */
+#if MALI_STATE_TRACKING
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\tGP: %s\n", core->hw_core.description);
+
+ return n;
+}
+#endif
+
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job)
+{
+ u32 val0 = 0;
+ u32 val1 = 0;
+ u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job);
+ u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job);
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ val0 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ mali_gp_job_set_perf_counter_value0(job, val0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C0, val0);
+ _mali_osk_profiling_record_global_counters(COUNTER_VP_0_C0, val0);
+#endif
+
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ val1 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ mali_gp_job_set_perf_counter_value1(job, val1);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C1, val1);
+ _mali_osk_profiling_record_global_counters(COUNTER_VP_0_C1, val1);
+#endif
+ }
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_gp.h b/drivers/gpu/arm/utgard/common/mali_gp.h
new file mode 100644
index 000000000000..8d5f69c23229
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_gp.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_H__
+#define __MALI_GP_H__
+
+#include "mali_osk.h"
+#include "mali_gp_job.h"
+#include "mali_hw_core.h"
+#include "regs/mali_gp_regs.h"
+
+struct mali_group;
+
+/**
+ * Definition of the GP core struct
+ * Used to track a GP core in the system.
+ */
+struct mali_gp_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+};
+
+_mali_osk_errcode_t mali_gp_initialize(void);
+void mali_gp_terminate(void);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group);
+void mali_gp_delete(struct mali_gp_core *core);
+
+void mali_gp_stop_bus(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core);
+void mali_gp_reset_async(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core);
+void mali_gp_hard_reset(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core);
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job);
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr);
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core);
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void);
+
+#if MALI_STATE_TRACKING
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size);
+#endif
+
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job);
+
+MALI_STATIC_INLINE const char *mali_gp_core_description(struct mali_gp_core *core)
+{
+ return core->hw_core.description;
+}
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_gp_get_interrupt_result(struct mali_gp_core *core)
+{
+ u32 stat_used = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT) &
+ MALIGP2_REG_VAL_IRQ_MASK_USED;
+
+ if (0 == stat_used) {
+ return MALI_INTERRUPT_RESULT_NONE;
+ } else if ((MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST |
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST) == stat_used) {
+ return MALI_INTERRUPT_RESULT_SUCCESS;
+ } else if (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST == stat_used) {
+ return MALI_INTERRUPT_RESULT_SUCCESS_VS;
+ } else if (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST == stat_used) {
+ return MALI_INTERRUPT_RESULT_SUCCESS_PLBU;
+ } else if (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM & stat_used) {
+ return MALI_INTERRUPT_RESULT_OOM;
+ }
+
+ return MALI_INTERRUPT_RESULT_ERROR;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_get_rawstat(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core,
+ MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+}
+
+MALI_STATIC_INLINE u32 mali_gp_is_active(struct mali_gp_core *core)
+{
+ u32 status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
+ return (status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE void mali_gp_mask_all_interrupts(struct mali_gp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE);
+}
+
+MALI_STATIC_INLINE void mali_gp_enable_interrupts(struct mali_gp_core *core, enum mali_interrupt_result exceptions)
+{
+ /* Enable all interrupts, except those specified in exceptions */
+ u32 value;
+
+ if (MALI_INTERRUPT_RESULT_SUCCESS_VS == exceptions) {
+ /* Enable all used except VS complete */
+ value = MALIGP2_REG_VAL_IRQ_MASK_USED &
+ ~MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST;
+ } else {
+ MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_SUCCESS_PLBU ==
+ exceptions);
+ /* Enable all used except PLBU complete */
+ value = MALIGP2_REG_VAL_IRQ_MASK_USED &
+ ~MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST;
+ }
+
+ mali_hw_core_register_write(&core->hw_core,
+ MALIGP2_REG_ADDR_MGMT_INT_MASK,
+ value);
+}
+
+MALI_STATIC_INLINE u32 mali_gp_read_plbu_alloc_start_addr(struct mali_gp_core *core)
+{
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR);
+}
+
+#endif /* __MALI_GP_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_gp_job.c b/drivers/gpu/arm/utgard/common/mali_gp_job.c
new file mode 100644
index 000000000000..adc30a3408a8
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_gp_job.c
@@ -0,0 +1,301 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp_job.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_defer_bind.h"
+
+static u32 gp_counter_src0 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 gp_counter_src1 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+static void _mali_gp_del_varying_allocations(struct mali_gp_job *job);
+
+
+static int _mali_gp_add_varying_allocations(struct mali_session_data *session,
+ struct mali_gp_job *job,
+ u32 *alloc,
+ u32 num)
+{
+ int i = 0;
+ struct mali_gp_allocation_node *alloc_node;
+ mali_mem_allocation *mali_alloc = NULL;
+ struct mali_vma_node *mali_vma_node = NULL;
+
+ for (i = 0 ; i < num ; i++) {
+ MALI_DEBUG_ASSERT(alloc[i]);
+ alloc_node = _mali_osk_calloc(1, sizeof(struct mali_gp_allocation_node));
+ if (alloc_node) {
+ INIT_LIST_HEAD(&alloc_node->node);
+ /* find mali allocation structure by vaddress*/
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, alloc[i], 0);
+
+ if (likely(mali_vma_node)) {
+ mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+ MALI_DEBUG_ASSERT(alloc[i] == mali_vma_node->vm_node.start);
+ } else {
+ MALI_DEBUG_PRINT(1, ("ERROE!_mali_gp_add_varying_allocations,can't find allocation %d by address =0x%x, num=%d\n", i, alloc[i], num));
+ MALI_DEBUG_ASSERT(0);
+ }
+ alloc_node->alloc = mali_alloc;
+ /* add to gp job varying alloc list*/
+ list_move(&alloc_node->node, &job->varying_alloc);
+ } else
+ goto fail;
+ }
+
+ return 0;
+fail:
+ MALI_DEBUG_PRINT(1, ("ERROE!_mali_gp_add_varying_allocations,failed to alloc memory!\n"));
+ _mali_gp_del_varying_allocations(job);
+ return -1;
+}
+
+
+static void _mali_gp_del_varying_allocations(struct mali_gp_job *job)
+{
+ struct mali_gp_allocation_node *alloc_node, *tmp_node;
+
+ list_for_each_entry_safe(alloc_node, tmp_node, &job->varying_alloc, node) {
+ list_del(&alloc_node->node);
+ kfree(alloc_node);
+ }
+ INIT_LIST_HEAD(&job->varying_alloc);
+}
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker)
+{
+ struct mali_gp_job *job;
+ u32 perf_counter_flag;
+ u32 __user *memory_list = NULL;
+ struct mali_gp_allocation_node *alloc_node, *tmp_node;
+
+ job = _mali_osk_calloc(1, sizeof(struct mali_gp_job));
+ if (NULL != job) {
+ job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s));
+ if (NULL == job->finished_notification) {
+ goto fail3;
+ }
+
+ job->oom_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
+ if (NULL == job->oom_notification) {
+ goto fail2;
+ }
+
+ if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_gp_start_job_s))) {
+ goto fail1;
+ }
+
+ perf_counter_flag = mali_gp_job_get_perf_counter_flag(job);
+
+ /* case when no counters came from user space
+ * so pass the debugfs / DS-5 provided global ones to the job object */
+ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
+ (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) {
+ mali_gp_job_set_perf_counter_src0(job, mali_gp_job_get_gp_counter_src0());
+ mali_gp_job_set_perf_counter_src1(job, mali_gp_job_get_gp_counter_src1());
+ }
+
+ _mali_osk_list_init(&job->list);
+ job->session = session;
+ job->id = id;
+ job->heap_base_addr = job->uargs.frame_registers[4];
+ job->heap_current_addr = job->uargs.frame_registers[4];
+ job->heap_grow_size = job->uargs.heap_grow_size;
+ job->perf_counter_value0 = 0;
+ job->perf_counter_value1 = 0;
+ job->pid = _mali_osk_get_pid();
+ job->tid = _mali_osk_get_tid();
+
+
+ INIT_LIST_HEAD(&job->varying_alloc);
+ INIT_LIST_HEAD(&job->vary_todo);
+ job->dmem = NULL;
+ /* add varying allocation list*/
+ if (uargs->varying_alloc_num) {
+ /* copy varying list from user space*/
+ job->varying_list = _mali_osk_calloc(1, sizeof(u32) * uargs->varying_alloc_num);
+ if (!job->varying_list) {
+ MALI_PRINT_ERROR(("Mali GP job: allocate varying_list failed varying_alloc_num = %d !\n", uargs->varying_alloc_num));
+ goto fail1;
+ }
+
+ memory_list = (u32 __user *)(uintptr_t)uargs->varying_alloc_list;
+
+ if (0 != _mali_osk_copy_from_user(job->varying_list, memory_list, sizeof(u32)*uargs->varying_alloc_num)) {
+ MALI_PRINT_ERROR(("Mali GP job: Failed to copy varying list from user space!\n"));
+ goto fail;
+ }
+
+ if (unlikely(_mali_gp_add_varying_allocations(session, job, job->varying_list,
+ uargs->varying_alloc_num))) {
+ MALI_PRINT_ERROR(("Mali GP job: _mali_gp_add_varying_allocations failed!\n"));
+ goto fail;
+ }
+
+ /* do preparetion for each allocation */
+ list_for_each_entry_safe(alloc_node, tmp_node, &job->varying_alloc, node) {
+ if (unlikely(_MALI_OSK_ERR_OK != mali_mem_defer_bind_allocation_prepare(alloc_node->alloc, &job->vary_todo))) {
+ MALI_PRINT_ERROR(("Mali GP job: mali_mem_defer_bind_allocation_prepare failed!\n"));
+ goto fail;
+ }
+ }
+
+ _mali_gp_del_varying_allocations(job);
+
+ /* bind varying here, to avoid memory latency issue. */
+ {
+ struct mali_defer_mem_block dmem_block;
+
+ INIT_LIST_HEAD(&dmem_block.free_pages);
+ atomic_set(&dmem_block.num_free_pages, 0);
+
+ if (mali_mem_prepare_mem_for_job(job, &dmem_block)) {
+ MALI_PRINT_ERROR(("Mali GP job: mali_mem_prepare_mem_for_job failed!\n"));
+ goto fail;
+ }
+ if (_MALI_OSK_ERR_OK != mali_mem_defer_bind(job->uargs.varying_memsize / _MALI_OSK_MALI_PAGE_SIZE, job, &dmem_block)) {
+ MALI_PRINT_ERROR(("gp job create, mali_mem_defer_bind failed! GP %x fail!", job));
+ goto fail;
+ }
+ }
+
+ if (uargs->varying_memsize > MALI_UK_BIG_VARYING_SIZE) {
+ job->big_job = 1;
+ }
+ }
+ job->pp_tracker = pp_tracker;
+ if (NULL != job->pp_tracker) {
+ /* Take a reference on PP job's tracker that will be released when the GP
+ job is done. */
+ mali_timeline_system_tracker_get(session->timeline_system, pp_tracker);
+ }
+
+ mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_GP, NULL, job);
+ mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence));
+
+ return job;
+ } else {
+ MALI_PRINT_ERROR(("Mali GP job: _mali_osk_calloc failed!\n"));
+ return NULL;
+ }
+
+
+fail:
+ _mali_osk_free(job->varying_list);
+ /* Handle allocate fail here, free all varying node */
+ {
+ struct mali_backend_bind_list *bkn, *bkn_tmp;
+ list_for_each_entry_safe(bkn, bkn_tmp , &job->vary_todo, node) {
+ list_del(&bkn->node);
+ _mali_osk_free(bkn);
+ }
+ }
+fail1:
+ _mali_osk_notification_delete(job->oom_notification);
+fail2:
+ _mali_osk_notification_delete(job->finished_notification);
+fail3:
+ _mali_osk_free(job);
+ return NULL;
+}
+
+void mali_gp_job_delete(struct mali_gp_job *job)
+{
+ struct mali_backend_bind_list *bkn, *bkn_tmp;
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(NULL == job->pp_tracker);
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+ _mali_osk_free(job->varying_list);
+
+ /* Handle allocate fail here, free all varying node */
+ list_for_each_entry_safe(bkn, bkn_tmp , &job->vary_todo, node) {
+ list_del(&bkn->node);
+ _mali_osk_free(bkn);
+ }
+
+ if (!list_empty(&job->vary_todo)) {
+ MALI_DEBUG_ASSERT(0);
+ }
+
+ mali_mem_defer_dmem_free(job);
+
+ /* de-allocate the pre-allocated oom notifications */
+ if (NULL != job->oom_notification) {
+ _mali_osk_notification_delete(job->oom_notification);
+ job->oom_notification = NULL;
+ }
+ if (NULL != job->finished_notification) {
+ _mali_osk_notification_delete(job->finished_notification);
+ job->finished_notification = NULL;
+ }
+
+ _mali_osk_free(job);
+}
+
+void mali_gp_job_list_add(struct mali_gp_job *job, _mali_osk_list_t *list)
+{
+ struct mali_gp_job *iter;
+ struct mali_gp_job *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ /* Find position in list/queue where job should be added. */
+ _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, list,
+ struct mali_gp_job, list) {
+
+ /* A span is used to handle job ID wrapping. */
+ bool job_is_after = (mali_gp_job_get_id(job) -
+ mali_gp_job_get_id(iter)) <
+ MALI_SCHEDULER_JOB_ID_SPAN;
+
+ if (job_is_after) {
+ break;
+ }
+ }
+
+ _mali_osk_list_add(&job->list, &iter->list);
+}
+
+u32 mali_gp_job_get_gp_counter_src0(void)
+{
+ return gp_counter_src0;
+}
+
+void mali_gp_job_set_gp_counter_src0(u32 counter)
+{
+ gp_counter_src0 = counter;
+}
+
+u32 mali_gp_job_get_gp_counter_src1(void)
+{
+ return gp_counter_src1;
+}
+
+void mali_gp_job_set_gp_counter_src1(u32 counter)
+{
+ gp_counter_src1 = counter;
+}
+
+mali_scheduler_mask mali_gp_job_signal_pp_tracker(struct mali_gp_job *job, mali_bool success)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (NULL != job->pp_tracker) {
+ schedule_mask |= mali_timeline_system_tracker_put(job->session->timeline_system, job->pp_tracker, MALI_FALSE == success);
+ job->pp_tracker = NULL;
+ }
+
+ return schedule_mask;
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_gp_job.h b/drivers/gpu/arm/utgard/common/mali_gp_job.h
new file mode 100644
index 000000000000..f249439c7155
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_gp_job.h
@@ -0,0 +1,325 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_JOB_H__
+#define __MALI_GP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+#include "mali_timeline.h"
+#include "mali_scheduler_types.h"
+#include "mali_scheduler.h"
+#include "mali_executor.h"
+#include "mali_timeline.h"
+
+struct mali_defer_mem;
+/**
+ * This structure represents a GP job
+ *
+ * The GP job object itself is not protected by any single lock,
+ * but relies on other locks instead (scheduler, executor and timeline lock).
+ * Think of the job object as moving between these sub systems through-out
+ * its lifetime. Different part of the GP job struct is used by different
+ * subsystems. Accessor functions ensure that correct lock is taken.
+ * Do NOT access any data members directly from outside this module!
+ */
+struct mali_gp_job {
+ /*
+ * These members are typically only set at creation,
+ * and only read later on.
+ * They do not require any lock protection.
+ */
+ _mali_uk_gp_start_job_s uargs; /**< Arguments from user space */
+ struct mali_session_data *session; /**< Session which submitted this job */
+ u32 pid; /**< Process ID of submitting process */
+ u32 tid; /**< Thread ID of submitting thread */
+ u32 id; /**< Identifier for this job in kernel space (sequential numbering) */
+ u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
+ struct mali_timeline_tracker *pp_tracker; /**< Pointer to Timeline tracker for PP job that depends on this job. */
+ _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */
+
+ /*
+ * These members are used by the scheduler,
+ * protected by scheduler lock
+ */
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+
+ /*
+ * These members are used by the executor and/or group,
+ * protected by executor lock
+ */
+ _mali_osk_notification_t *oom_notification; /**< Notification sent back to userspace on OOM */
+
+ /*
+ * Set by executor/group on job completion, read by scheduler when
+ * returning job to user. Hold executor lock when setting,
+ * no lock needed when reading
+ */
+ u32 heap_base_addr; /** < Holds the base mali addr of mem handle which is used for new heap*/
+ u32 heap_current_addr; /**< Holds the current HEAP address when the job has completed */
+ u32 heap_grow_size; /** < Holds the HEAP grow size when HEAP oom */
+ u32 perf_counter_value0; /**< Value of performance counter 0 (to be returned to user space) */
+ u32 perf_counter_value1; /**< Value of performance counter 1 (to be returned to user space) */
+ struct mali_defer_mem *dmem; /** < used for defer bind to store dmem info */
+ struct list_head varying_alloc; /**< hold the list of varying allocations */
+ u32 bind_flag; /** < flag for deferbind*/
+ u32 *varying_list; /**< varying memory list need to to defer bind*/
+ struct list_head vary_todo; /**< list of backend list need to do defer bind*/
+ u32 big_job; /** < if the gp job have large varying output and may take long time*/
+};
+
+#define MALI_DEFER_BIND_MEMORY_PREPARED (0x1 << 0)
+#define MALI_DEFER_BIND_MEMORY_BINDED (0x1 << 2)
+
+struct mali_gp_allocation_node {
+ struct list_head node;
+ mali_mem_allocation *alloc;
+};
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker);
+void mali_gp_job_delete(struct mali_gp_job *job);
+
+u32 mali_gp_job_get_gp_counter_src0(void);
+void mali_gp_job_set_gp_counter_src0(u32 counter);
+u32 mali_gp_job_get_gp_counter_src1(void);
+void mali_gp_job_set_gp_counter_src1(u32 counter);
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_id(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_cache_order(struct mali_gp_job *job,
+ u32 cache_order)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ job->cache_order = cache_order;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_cache_order(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (NULL == job) ? 0 : job->cache_order;
+}
+
+MALI_STATIC_INLINE u64 mali_gp_job_get_user_id(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.user_job_ptr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_frame_builder_id(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_flush_id(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.flush_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_pid(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->pid;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_tid(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->tid;
+}
+
+MALI_STATIC_INLINE u32 *mali_gp_job_get_frame_registers(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.frame_registers;
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_gp_job_get_session(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_vs_job(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (job->uargs.frame_registers[0] != job->uargs.frame_registers[1]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_plbu_job(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (job->uargs.frame_registers[2] != job->uargs.frame_registers[3]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_current_heap_addr(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->heap_current_addr;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_current_heap_addr(struct mali_gp_job *job, u32 heap_addr)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ job->heap_current_addr = heap_addr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_flag(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.perf_counter_flag;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src0(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.perf_counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src1(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.perf_counter_src1;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value0(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->perf_counter_value0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value1(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->perf_counter_value1;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src0(struct mali_gp_job *job, u32 src)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ job->uargs.perf_counter_src0 = src;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src1(struct mali_gp_job *job, u32 src)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ job->uargs.perf_counter_src1 = src;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value0(struct mali_gp_job *job, u32 value)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ job->perf_counter_value0 = value;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value1(struct mali_gp_job *job, u32 value)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ job->perf_counter_value1 = value;
+}
+
+void mali_gp_job_list_add(struct mali_gp_job *job, _mali_osk_list_t *list);
+
+MALI_STATIC_INLINE void mali_gp_job_list_move(struct mali_gp_job *job,
+ _mali_osk_list_t *list)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job->list));
+ _mali_osk_list_move(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_gp_job_list_remove(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ _mali_osk_list_delinit(&job->list);
+}
+
+MALI_STATIC_INLINE _mali_osk_notification_t *
+mali_gp_job_get_finished_notification(struct mali_gp_job *job)
+{
+ _mali_osk_notification_t *notification;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->finished_notification);
+
+ notification = job->finished_notification;
+ job->finished_notification = NULL;
+
+ return notification;
+}
+
+MALI_STATIC_INLINE _mali_osk_notification_t *mali_gp_job_get_oom_notification(
+ struct mali_gp_job *job)
+{
+ _mali_osk_notification_t *notification;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT_POINTER(job->oom_notification);
+
+ notification = job->oom_notification;
+ job->oom_notification = NULL;
+
+ return notification;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_oom_notification(
+ struct mali_gp_job *job,
+ _mali_osk_notification_t *notification)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(NULL == job->oom_notification);
+ job->oom_notification = notification;
+}
+
+MALI_STATIC_INLINE struct mali_timeline_tracker *mali_gp_job_get_tracker(
+ struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return &(job->tracker);
+}
+
+
+MALI_STATIC_INLINE u32 *mali_gp_job_get_timeline_point_ptr(
+ struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
+}
+
+
+/**
+ * Release reference on tracker for PP job that depends on this GP job.
+ *
+ * @note If GP job has a reference on tracker, this function MUST be called before the GP job is
+ * deleted.
+ *
+ * @param job GP job that is done.
+ * @param success MALI_TRUE if job completed successfully, MALI_FALSE if not.
+ * @return A scheduling bitmask indicating whether scheduling needs to be done.
+ */
+mali_scheduler_mask mali_gp_job_signal_pp_tracker(struct mali_gp_job *job, mali_bool success);
+
+#endif /* __MALI_GP_JOB_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_group.c b/drivers/gpu/arm/utgard/common/mali_group.c
new file mode 100644
index 000000000000..b4cd3a1fcd8c
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_group.c
@@ -0,0 +1,1816 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_mmu.h"
+#include "mali_dlbu.h"
+#include "mali_broadcast.h"
+#include "mali_scheduler.h"
+#include "mali_osk_profiling.h"
+#include "mali_pm_domain.h"
+#include "mali_pm.h"
+#include "mali_executor.h"
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+
+#define MALI_MAX_NUM_DOMAIN_REFS (MALI_MAX_NUMBER_OF_GROUPS * 2)
+
+#if defined(CONFIG_MALI400_PROFILING)
+static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num);
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS] = { NULL, };
+static u32 mali_global_num_groups = 0;
+
+/* SW timer for job execution */
+int mali_max_job_runtime = MALI_MAX_JOB_RUNTIME_DEFAULT;
+
+/* local helper functions */
+static void mali_group_bottom_half_mmu(void *data);
+static void mali_group_bottom_half_gp(void *data);
+static void mali_group_bottom_half_pp(void *data);
+static void mali_group_timeout(void *data);
+static void mali_group_out_of_memory(void *data);
+
+static void mali_group_reset_pp(struct mali_group *group);
+static void mali_group_reset_mmu(struct mali_group *group);
+
+static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_recovery_reset(struct mali_group *group);
+
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
+ struct mali_dlbu_core *dlbu,
+ struct mali_bcast_unit *bcast,
+ u32 domain_index)
+{
+ struct mali_group *group = NULL;
+
+ if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS) {
+ MALI_PRINT_ERROR(("Mali group: Too many group objects created\n"));
+ return NULL;
+ }
+
+ group = _mali_osk_calloc(1, sizeof(struct mali_group));
+ if (NULL != group) {
+ group->timeout_timer = _mali_osk_timer_init();
+ if (NULL != group->timeout_timer) {
+ _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group);
+
+ group->l2_cache_core[0] = core;
+ _mali_osk_list_init(&group->group_list);
+ _mali_osk_list_init(&group->executor_list);
+ _mali_osk_list_init(&group->pm_domain_list);
+ group->bcast_core = bcast;
+ group->dlbu_core = dlbu;
+
+ /* register this object as a part of the correct power domain */
+ if ((NULL != core) || (NULL != dlbu) || (NULL != bcast))
+ group->pm_domain = mali_pm_register_group(domain_index, group);
+
+ mali_global_groups[mali_global_num_groups] = group;
+ mali_global_num_groups++;
+
+ return group;
+ }
+ _mali_osk_free(group);
+ }
+
+ return NULL;
+}
+
+void mali_group_delete(struct mali_group *group)
+{
+ u32 i;
+
+ MALI_DEBUG_PRINT(4, ("Deleting group %s\n",
+ mali_group_core_description(group)));
+
+ MALI_DEBUG_ASSERT(NULL == group->parent_group);
+ MALI_DEBUG_ASSERT((MALI_GROUP_STATE_INACTIVE == group->state) || ((MALI_GROUP_STATE_ACTIVATION_PENDING == group->state)));
+
+ /* Delete the resources that this group owns */
+ if (NULL != group->gp_core) {
+ mali_gp_delete(group->gp_core);
+ }
+
+ if (NULL != group->pp_core) {
+ mali_pp_delete(group->pp_core);
+ }
+
+ if (NULL != group->mmu) {
+ mali_mmu_delete(group->mmu);
+ }
+
+ if (mali_group_is_virtual(group)) {
+ /* Remove all groups from virtual group */
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ child->parent_group = NULL;
+ mali_group_delete(child);
+ }
+
+ mali_dlbu_delete(group->dlbu_core);
+
+ if (NULL != group->bcast_core) {
+ mali_bcast_unit_delete(group->bcast_core);
+ }
+ }
+
+ for (i = 0; i < mali_global_num_groups; i++) {
+ if (mali_global_groups[i] == group) {
+ mali_global_groups[i] = NULL;
+ mali_global_num_groups--;
+
+ if (i != mali_global_num_groups) {
+ /* We removed a group from the middle of the array -- move the last
+ * group to the current position to close the gap */
+ mali_global_groups[i] = mali_global_groups[mali_global_num_groups];
+ mali_global_groups[mali_global_num_groups] = NULL;
+ }
+
+ break;
+ }
+ }
+
+ if (NULL != group->timeout_timer) {
+ _mali_osk_timer_del(group->timeout_timer);
+ _mali_osk_timer_term(group->timeout_timer);
+ }
+
+ if (NULL != group->bottom_half_work_mmu) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+ }
+
+ if (NULL != group->bottom_half_work_gp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+ }
+
+ if (NULL != group->bottom_half_work_pp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+ }
+
+ _mali_osk_free(group);
+}
+
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core *mmu_core)
+{
+ /* This group object now owns the MMU core object */
+ group->mmu = mmu_core;
+ group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group);
+ if (NULL == group->bottom_half_work_mmu) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_mmu_core(struct mali_group *group)
+{
+ /* This group object no longer owns the MMU core object */
+ group->mmu = NULL;
+ if (NULL != group->bottom_half_work_mmu) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+ }
+}
+
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core *gp_core)
+{
+ /* This group object now owns the GP core object */
+ group->gp_core = gp_core;
+ group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group);
+ if (NULL == group->bottom_half_work_gp) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ group->oom_work_handler = _mali_osk_wq_create_work(mali_group_out_of_memory, group);
+ if (NULL == group->oom_work_handler) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_gp_core(struct mali_group *group)
+{
+ /* This group object no longer owns the GP core object */
+ group->gp_core = NULL;
+ if (NULL != group->bottom_half_work_gp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+ }
+
+ if (NULL != group->oom_work_handler) {
+ _mali_osk_wq_delete_work(group->oom_work_handler);
+ }
+}
+
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core *pp_core)
+{
+ /* This group object now owns the PP core object */
+ group->pp_core = pp_core;
+ group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group);
+ if (NULL == group->bottom_half_work_pp) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_group_remove_pp_core(struct mali_group *group)
+{
+ /* This group object no longer owns the PP core object */
+ group->pp_core = NULL;
+ if (NULL != group->bottom_half_work_pp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+ }
+}
+
+enum mali_group_state mali_group_activate(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(4, ("Group: Activating group %s\n",
+ mali_group_core_description(group)));
+
+ if (MALI_GROUP_STATE_INACTIVE == group->state) {
+ /* Group is inactive, get PM refs in order to power up */
+
+ /*
+ * We'll take a maximum of 2 power domain references pr group,
+ * one for the group itself, and one for it's L2 cache.
+ */
+ struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
+ struct mali_group *groups[MALI_MAX_NUM_DOMAIN_REFS];
+ u32 num_domains = 0;
+ mali_bool all_groups_on;
+
+ /* Deal with child groups first */
+ if (mali_group_is_virtual(group)) {
+ /*
+ * The virtual group might have 0, 1 or 2 L2s in
+ * its l2_cache_core array, but we ignore these and
+ * let the child groups take the needed L2 cache ref
+ * on behalf of the virtual group.
+ * In other words; The L2 refs are taken in pair with
+ * the physical group which the L2 is attached to.
+ */
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ /*
+ * Child group is inactive, get PM
+ * refs in order to power up.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+ &group->group_list,
+ struct mali_group, group_list) {
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE
+ == child->state);
+
+ child->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
+
+ MALI_DEBUG_ASSERT_POINTER(
+ child->pm_domain);
+ domains[num_domains] = child->pm_domain;
+ groups[num_domains] = child;
+ num_domains++;
+
+ /*
+ * Take L2 domain ref for child group.
+ */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS
+ > num_domains);
+ domains[num_domains] = mali_l2_cache_get_pm_domain(
+ child->l2_cache_core[0]);
+ groups[num_domains] = NULL;
+ MALI_DEBUG_ASSERT(NULL ==
+ child->l2_cache_core[1]);
+ num_domains++;
+ }
+ } else {
+ /* Take L2 domain ref for physical groups. */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+ num_domains);
+
+ domains[num_domains] = mali_l2_cache_get_pm_domain(
+ group->l2_cache_core[0]);
+ groups[num_domains] = NULL;
+ MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
+ num_domains++;
+ }
+
+ /* Do the group itself last (it's dependencies first) */
+
+ group->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
+
+ MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
+ domains[num_domains] = group->pm_domain;
+ groups[num_domains] = group;
+ num_domains++;
+
+ all_groups_on = mali_pm_get_domain_refs(domains, groups,
+ num_domains);
+
+ /*
+ * Complete activation for group, include
+ * virtual group or physical group.
+ */
+ if (MALI_TRUE == all_groups_on) {
+
+ mali_group_set_active(group);
+ }
+ } else if (MALI_GROUP_STATE_ACTIVE == group->state) {
+ /* Already active */
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+ } else {
+ /*
+ * Activation already pending, group->power_is_on could
+ * be both true or false. We need to wait for power up
+ * notification anyway.
+ */
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING
+ == group->state);
+ }
+
+ MALI_DEBUG_PRINT(4, ("Group: group %s activation result: %s\n",
+ mali_group_core_description(group),
+ MALI_GROUP_STATE_ACTIVE == group->state ?
+ "ACTIVE" : "PENDING"));
+
+ return group->state;
+}
+
+mali_bool mali_group_set_active(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING == group->state);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+
+ MALI_DEBUG_PRINT(4, ("Group: Activation completed for %s\n",
+ mali_group_core_description(group)));
+
+ if (mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list,
+ struct mali_group, group_list) {
+ if (MALI_TRUE != child->power_is_on) {
+ return MALI_FALSE;
+ }
+
+ child->state = MALI_GROUP_STATE_ACTIVE;
+ }
+
+ mali_group_reset(group);
+ }
+
+ /* Go to ACTIVE state */
+ group->state = MALI_GROUP_STATE_ACTIVE;
+
+ return MALI_TRUE;
+}
+
+mali_bool mali_group_deactivate(struct mali_group *group)
+{
+ struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
+ u32 num_domains = 0;
+ mali_bool power_down = MALI_FALSE;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE != group->state);
+
+ MALI_DEBUG_PRINT(3, ("Group: Deactivating group %s\n",
+ mali_group_core_description(group)));
+
+ group->state = MALI_GROUP_STATE_INACTIVE;
+
+ MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
+ domains[num_domains] = group->pm_domain;
+ num_domains++;
+
+ if (mali_group_is_virtual(group)) {
+ /* Release refs for all child groups */
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+ &group->group_list,
+ struct mali_group, group_list) {
+ child->state = MALI_GROUP_STATE_INACTIVE;
+
+ MALI_DEBUG_ASSERT_POINTER(child->pm_domain);
+ domains[num_domains] = child->pm_domain;
+ num_domains++;
+
+ /* Release L2 cache domain for child groups */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+ num_domains);
+ domains[num_domains] = mali_l2_cache_get_pm_domain(
+ child->l2_cache_core[0]);
+ MALI_DEBUG_ASSERT(NULL == child->l2_cache_core[1]);
+ num_domains++;
+ }
+
+ /*
+ * Must do mali_group_power_down() steps right here for
+ * virtual group, because virtual group itself is likely to
+ * stay powered on, however child groups are now very likely
+ * to be powered off (and thus lose their state).
+ */
+
+ mali_group_clear_session(group);
+ /*
+ * Disable the broadcast unit (clear it's mask).
+ * This is needed in case the GPU isn't actually
+ * powered down at this point and groups are
+ * removed from an inactive virtual group.
+ * If not, then the broadcast unit will intercept
+ * their interrupts!
+ */
+ mali_bcast_disable(group->bcast_core);
+ } else {
+ /* Release L2 cache domain for physical groups */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+ num_domains);
+ domains[num_domains] = mali_l2_cache_get_pm_domain(
+ group->l2_cache_core[0]);
+ MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
+ num_domains++;
+ }
+
+ power_down = mali_pm_put_domain_refs(domains, num_domains);
+
+ return power_down;
+}
+
+void mali_group_power_up(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(3, ("Group: Power up for %s\n",
+ mali_group_core_description(group)));
+
+ group->power_is_on = MALI_TRUE;
+
+ if (MALI_FALSE == mali_group_is_virtual(group)
+ && MALI_FALSE == mali_group_is_in_virtual(group)) {
+ mali_group_reset(group);
+ }
+
+ /*
+ * When we just acquire only one physical group form virt group,
+ * we should remove the bcast&dlbu mask from virt group and
+ * reset bcast and dlbu core, although part of pp cores in virt
+ * group maybe not be powered on.
+ */
+ if (MALI_TRUE == mali_group_is_virtual(group)) {
+ mali_bcast_reset(group->bcast_core);
+ mali_dlbu_update_mask(group->dlbu_core);
+ }
+}
+
+void mali_group_power_down(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(3, ("Group: Power down for %s\n",
+ mali_group_core_description(group)));
+
+ group->power_is_on = MALI_FALSE;
+
+ if (mali_group_is_virtual(group)) {
+ /*
+ * What we do for physical jobs in this function should
+ * already have been done in mali_group_deactivate()
+ * for virtual group.
+ */
+ MALI_DEBUG_ASSERT(NULL == group->session);
+ } else {
+ mali_group_clear_session(group);
+ }
+}
+
+MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup)
+{
+ u32 i;
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ MALI_DEBUG_PRINT(4, ("Virtual group %s (%p)\n",
+ mali_group_core_description(vgroup),
+ vgroup));
+ MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0]));
+ MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1]));
+
+ i = 0;
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) {
+ MALI_DEBUG_PRINT(4, ("[%d] %s (%p), l2_cache_core[0] = %p\n",
+ i, mali_group_core_description(group),
+ group, group->l2_cache_core[0]));
+ i++;
+ }
+})
+
+static void mali_group_dump_core_status(struct mali_group *group)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(NULL != group->gp_core || (NULL != group->pp_core && !mali_group_is_virtual(group)));
+
+ if (NULL != group->gp_core) {
+ MALI_PRINT(("Dump Group %s\n", group->gp_core->hw_core.description));
+
+ for (i = 0; i < 0xA8; i += 0x10) {
+ MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->gp_core->hw_core, i),
+ mali_hw_core_register_read(&group->gp_core->hw_core, i + 4),
+ mali_hw_core_register_read(&group->gp_core->hw_core, i + 8),
+ mali_hw_core_register_read(&group->gp_core->hw_core, i + 12)));
+ }
+
+
+ } else {
+ MALI_PRINT(("Dump Group %s\n", group->pp_core->hw_core.description));
+
+ for (i = 0; i < 0x5c; i += 0x10) {
+ MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->pp_core->hw_core, i),
+ mali_hw_core_register_read(&group->pp_core->hw_core, i + 4),
+ mali_hw_core_register_read(&group->pp_core->hw_core, i + 8),
+ mali_hw_core_register_read(&group->pp_core->hw_core, i + 12)));
+ }
+
+ /* Ignore some minor registers */
+ for (i = 0x1000; i < 0x1068; i += 0x10) {
+ MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->pp_core->hw_core, i),
+ mali_hw_core_register_read(&group->pp_core->hw_core, i + 4),
+ mali_hw_core_register_read(&group->pp_core->hw_core, i + 8),
+ mali_hw_core_register_read(&group->pp_core->hw_core, i + 12)));
+ }
+ }
+
+ MALI_PRINT(("Dump Group MMU\n"));
+ for (i = 0; i < 0x24; i += 0x10) {
+ MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->mmu->hw_core, i),
+ mali_hw_core_register_read(&group->mmu->hw_core, i + 4),
+ mali_hw_core_register_read(&group->mmu->hw_core, i + 8),
+ mali_hw_core_register_read(&group->mmu->hw_core, i + 12)));
+ }
+}
+
+
+/**
+ * @Dump group status
+ */
+void mali_group_dump_status(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ if (mali_group_is_virtual(group)) {
+ struct mali_group *group_c;
+ struct mali_group *temp;
+ _MALI_OSK_LIST_FOREACHENTRY(group_c, temp, &group->group_list, struct mali_group, group_list) {
+ mali_group_dump_core_status(group_c);
+ }
+ } else {
+ mali_group_dump_core_status(group);
+ }
+}
+
+/**
+ * @brief Add child group to virtual group parent
+ */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child)
+{
+ mali_bool found;
+ u32 i;
+
+ MALI_DEBUG_PRINT(3, ("Adding group %s to virtual group %s\n",
+ mali_group_core_description(child),
+ mali_group_core_description(parent)));
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+ MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+ MALI_DEBUG_ASSERT(NULL == child->parent_group);
+
+ _mali_osk_list_addtail(&child->group_list, &parent->group_list);
+
+ child->parent_group = parent;
+
+ MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]);
+
+ MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1]));
+ MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1]));
+
+ /* Keep track of the L2 cache cores of child groups */
+ found = MALI_FALSE;
+ for (i = 0; i < 2; i++) {
+ if (parent->l2_cache_core[i] == child->l2_cache_core[0]) {
+ MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0);
+ parent->l2_cache_core_ref_count[i]++;
+ found = MALI_TRUE;
+ }
+ }
+
+ if (!found) {
+ /* First time we see this L2 cache, add it to our list */
+ i = (NULL == parent->l2_cache_core[0]) ? 0 : 1;
+
+ MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i]));
+
+ MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]);
+
+ parent->l2_cache_core[i] = child->l2_cache_core[0];
+ parent->l2_cache_core_ref_count[i]++;
+ }
+
+ /* Update Broadcast Unit and DLBU */
+ mali_bcast_add_group(parent->bcast_core, child);
+ mali_dlbu_add_group(parent->dlbu_core, child);
+
+ if (MALI_TRUE == parent->power_is_on) {
+ mali_bcast_reset(parent->bcast_core);
+ mali_dlbu_update_mask(parent->dlbu_core);
+ }
+
+ if (MALI_TRUE == child->power_is_on) {
+ if (NULL == parent->session) {
+ if (NULL != child->session) {
+ /*
+ * Parent has no session, so clear
+ * child session as well.
+ */
+ mali_mmu_activate_empty_page_directory(child->mmu);
+ }
+ } else {
+ if (parent->session == child->session) {
+ /* We already have same session as parent,
+ * so a simple zap should be enough.
+ */
+ mali_mmu_zap_tlb(child->mmu);
+ } else {
+ /*
+ * Parent has a different session, so we must
+ * switch to that sessions page table
+ */
+ mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session));
+ }
+
+ /* It is the parent which keeps the session from now on */
+ child->session = NULL;
+ }
+ } else {
+ /* should have been cleared when child was powered down */
+ MALI_DEBUG_ASSERT(NULL == child->session);
+ }
+
+ /* Start job on child when parent is active */
+ if (NULL != parent->pp_running_job) {
+ struct mali_pp_job *job = parent->pp_running_job;
+
+ MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
+ child, mali_pp_job_get_id(job), parent));
+
+ /* Only allowed to add active child to an active parent */
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == parent->state);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == child->state);
+
+ mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(
+ mali_pp_core_description(group->pp_core),
+ sched_clock(), mali_pp_job_get_tid(job),
+ 0, mali_pp_job_get_id(job));
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
+ }
+
+ MALI_DEBUG_CODE(mali_group_print_virtual(parent);)
+}
+
+/**
+ * @brief Remove child group from virtual group parent
+ */
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child)
+{
+ u32 i;
+
+ MALI_DEBUG_PRINT(3, ("Removing group %s from virtual group %s\n",
+ mali_group_core_description(child),
+ mali_group_core_description(parent)));
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+ MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
+ MALI_DEBUG_ASSERT(parent == child->parent_group);
+
+ /* Update Broadcast Unit and DLBU */
+ mali_bcast_remove_group(parent->bcast_core, child);
+ mali_dlbu_remove_group(parent->dlbu_core, child);
+
+ if (MALI_TRUE == parent->power_is_on) {
+ mali_bcast_reset(parent->bcast_core);
+ mali_dlbu_update_mask(parent->dlbu_core);
+ }
+
+ child->session = parent->session;
+ child->parent_group = NULL;
+
+ _mali_osk_list_delinit(&child->group_list);
+ if (_mali_osk_list_empty(&parent->group_list)) {
+ parent->session = NULL;
+ }
+
+ /* Keep track of the L2 cache cores of child groups */
+ i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1;
+
+ MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]);
+
+ parent->l2_cache_core_ref_count[i]--;
+ if (parent->l2_cache_core_ref_count[i] == 0) {
+ parent->l2_cache_core[i] = NULL;
+ }
+
+ MALI_DEBUG_CODE(mali_group_print_virtual(parent));
+}
+
+struct mali_group *mali_group_acquire_group(struct mali_group *parent)
+{
+ struct mali_group *child = NULL;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
+
+ if (!_mali_osk_list_empty(&parent->group_list)) {
+ child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
+ mali_group_remove_group(parent, child);
+ }
+
+ if (NULL != child) {
+ if (MALI_GROUP_STATE_ACTIVE != parent->state
+ && MALI_TRUE == child->power_is_on) {
+ mali_group_reset(child);
+ }
+ }
+
+ return child;
+}
+
+void mali_group_reset(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(NULL == group->gp_running_job);
+ MALI_DEBUG_ASSERT(NULL == group->pp_running_job);
+ MALI_DEBUG_ASSERT(NULL == group->session);
+
+ MALI_DEBUG_PRINT(3, ("Group: reset of %s\n",
+ mali_group_core_description(group)));
+
+ if (NULL != group->dlbu_core) {
+ mali_dlbu_reset(group->dlbu_core);
+ }
+
+ if (NULL != group->bcast_core) {
+ mali_bcast_reset(group->bcast_core);
+ }
+
+ MALI_DEBUG_ASSERT(NULL != group->mmu);
+ mali_group_reset_mmu(group);
+
+ if (NULL != group->gp_core) {
+ MALI_DEBUG_ASSERT(NULL == group->pp_core);
+ mali_gp_reset(group->gp_core);
+ } else {
+ MALI_DEBUG_ASSERT(NULL != group->pp_core);
+ mali_group_reset_pp(group);
+ }
+}
+
+void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job)
+{
+ struct mali_session_data *session;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(3, ("Group: Starting GP job 0x%08X on group %s\n",
+ job,
+ mali_group_core_description(group)));
+
+ session = mali_gp_job_get_session(job);
+
+ MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
+ mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
+
+ mali_group_activate_page_directory(group, session);
+
+ mali_gp_job_start(group->gp_core, job);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_gp_job_get_pid(job), 1 /* active */, 1 /* GP */, 0 /* core */,
+ mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job));
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, 0);
+ }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_gp_core_description(group->gp_core),
+ sched_clock(), mali_gp_job_get_tid(job),
+ 0, mali_gp_job_get_id(job));
+#endif
+
+ group->gp_running_job = job;
+ group->is_working = MALI_TRUE;
+
+ /* Setup SW timer and record start time */
+ group->start_time = _mali_osk_time_tickcount();
+ _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+
+ MALI_DEBUG_PRINT(4, ("Group: Started GP job 0x%08X on group %s at %u\n",
+ job,
+ mali_group_core_description(group),
+ group->start_time));
+}
+
+/* Used to set all the registers except frame renderer list address and fragment shader stack address
+ * It means the caller must set these two registers properly before calling this function
+ */
+void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job)
+{
+ struct mali_session_data *session;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(3, ("Group: Starting PP job 0x%08X part %u/%u on group %s\n",
+ job, sub_job + 1,
+ mali_pp_job_get_sub_job_count(job),
+ mali_group_core_description(group)));
+
+ session = mali_pp_job_get_session(job);
+
+ if (NULL != group->l2_cache_core[0]) {
+ mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_pp_job_get_cache_order(job));
+ }
+
+ if (NULL != group->l2_cache_core[1]) {
+ mali_l2_cache_invalidate_conditional(group->l2_cache_core[1], mali_pp_job_get_cache_order(job));
+ }
+
+ mali_group_activate_page_directory(group, session);
+
+ if (mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+ u32 core_num = 0;
+
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
+
+ /* Configure DLBU for the job */
+ mali_dlbu_config_job(group->dlbu_core, job);
+
+ /* Write stack address for each child group */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_write_addr_stack(child->pp_core, job);
+ core_num++;
+ }
+
+ mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
+ } else {
+ mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
+ }
+
+ /* if the group is virtual, loop through physical groups which belong to this group
+ * and call profiling events for its cores as virtual */
+ if (MALI_TRUE == mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
+ }
+
+#if defined(CONFIG_MALI400_PROFILING)
+ if (0 != group->l2_cache_core_ref_count[0]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+ }
+ if (0 != group->l2_cache_core_ref_count[1]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+ }
+ }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
+ } else { /* group is physical - call profiling events for physical cores */
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+ mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+ }
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(mali_pp_core_description(group->pp_core),
+ sched_clock(), mali_pp_job_get_tid(job),
+ 0, mali_pp_job_get_id(job));
+#endif
+
+ group->pp_running_job = job;
+ group->pp_running_sub_job = sub_job;
+ group->is_working = MALI_TRUE;
+
+ /* Setup SW timer and record start time */
+ group->start_time = _mali_osk_time_tickcount();
+ _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+
+ MALI_DEBUG_PRINT(4, ("Group: Started PP job 0x%08X part %u/%u on group %s at %u\n",
+ job, sub_job + 1,
+ mali_pp_job_get_sub_job_count(job),
+ mali_group_core_description(group),
+ group->start_time));
+
+}
+
+void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
+{
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
+ mali_l2_cache_invalidate(group->l2_cache_core[0]);
+
+ mali_mmu_zap_tlb_without_stall(group->mmu);
+
+ mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ 0, 0, 0, 0, 0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 1 /* active */, 1 /* GP */, 0 /* core */,
+ mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
+#endif
+}
+
+static void mali_group_reset_mmu(struct mali_group *group)
+{
+ struct mali_group *child;
+ struct mali_group *temp;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (!mali_group_is_virtual(group)) {
+ /* This is a physical group or an idle virtual group -- simply wait for
+ * the reset to complete. */
+ err = mali_mmu_reset(group->mmu);
+ MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+ } else { /* virtual group */
+ /* Loop through all members of this virtual group and wait
+ * until they are done resetting.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ err = mali_mmu_reset(child->mmu);
+ MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+ }
+ }
+}
+
+static void mali_group_reset_pp(struct mali_group *group)
+{
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ mali_pp_reset_async(group->pp_core);
+
+ if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) {
+ /* This is a physical group or an idle virtual group -- simply wait for
+ * the reset to complete. */
+ mali_pp_reset_wait(group->pp_core);
+ } else {
+ /* Loop through all members of this virtual group and wait until they
+ * are done resetting.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_reset_wait(child->pp_core);
+ }
+ }
+}
+
+struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job)
+{
+ struct mali_pp_job *pp_job_to_return;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
+ MALI_DEBUG_ASSERT_POINTER(sub_job);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
+
+ /* Stop/clear the timeout timer. */
+ _mali_osk_timer_del_async(group->timeout_timer);
+
+ if (NULL != group->pp_running_job) {
+
+ /* Deal with HW counters and profiling */
+
+ if (MALI_TRUE == mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ /* update performance counters from each physical pp core within this virtual group */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
+ }
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /* send profiling data per physical core */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+ mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+ mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+ 0, 0);
+
+ trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
+ 0 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+ mali_pp_job_get_frame_builder_id(group->pp_running_job),
+ mali_pp_job_get_flush_id(group->pp_running_job));
+ }
+ if (0 != group->l2_cache_core_ref_count[0]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+ }
+ if (0 != group->l2_cache_core_ref_count[1]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+ }
+ }
+
+#endif
+ } else {
+ /* update performance counters for a physical group's pp core */
+ mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+ mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
+ mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
+ mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+ 0, 0);
+
+ trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
+ 0 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
+ mali_pp_job_get_frame_builder_id(group->pp_running_job),
+ mali_pp_job_get_flush_id(group->pp_running_job));
+
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+#endif
+ }
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(
+ mali_gp_core_description(group->gp_core),
+ sched_clock(), 0, 0, 0);
+#endif
+
+ }
+
+ if (success) {
+ /* Only do soft reset for successful jobs, a full recovery
+ * reset will be done for failed jobs. */
+ mali_pp_reset_async(group->pp_core);
+ }
+
+ pp_job_to_return = group->pp_running_job;
+ group->pp_running_job = NULL;
+ group->is_working = MALI_FALSE;
+ *sub_job = group->pp_running_sub_job;
+
+ if (!success) {
+ MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
+ mali_group_recovery_reset(group);
+ } else if (_MALI_OSK_ERR_OK != mali_pp_reset_wait(group->pp_core)) {
+ MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
+ mali_group_recovery_reset(group);
+ }
+
+ return pp_job_to_return;
+}
+
+struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success)
+{
+ struct mali_gp_job *gp_job_to_return;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
+
+ /* Stop/clear the timeout timer. */
+ _mali_osk_timer_del_async(group->timeout_timer);
+
+ if (NULL != group->gp_running_job) {
+ mali_gp_update_performance_counters(group->gp_core, group->gp_running_job);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ mali_gp_job_get_perf_counter_value0(group->gp_running_job),
+ mali_gp_job_get_perf_counter_value1(group->gp_running_job),
+ mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
+ 0, 0);
+
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+ mali_group_report_l2_cache_counters_per_core(group, 0);
+#endif
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(
+ mali_pp_core_description(group->pp_core),
+ sched_clock(), 0, 0, 0);
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 0 /* active */, 1 /* GP */, 0 /* core */,
+ mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
+#endif
+
+ mali_gp_job_set_current_heap_addr(group->gp_running_job,
+ mali_gp_read_plbu_alloc_start_addr(group->gp_core));
+ }
+
+ if (success) {
+ /* Only do soft reset for successful jobs, a full recovery
+ * reset will be done for failed jobs. */
+ mali_gp_reset_async(group->gp_core);
+ }
+
+ gp_job_to_return = group->gp_running_job;
+ group->gp_running_job = NULL;
+ group->is_working = MALI_FALSE;
+
+ if (!success) {
+ MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
+ mali_group_recovery_reset(group);
+ } else if (_MALI_OSK_ERR_OK != mali_gp_reset_wait(group->gp_core)) {
+ MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n"));
+ mali_group_recovery_reset(group);
+ }
+
+ return gp_job_to_return;
+}
+
+struct mali_group *mali_group_get_glob_group(u32 index)
+{
+ if (mali_global_num_groups > index) {
+ return mali_global_groups[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_group_get_glob_num_groups(void)
+{
+ return mali_global_num_groups;
+}
+
+static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session)
+{
+ MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group %s\n",
+ mali_session_get_page_directory(session), session,
+ mali_group_core_description(group)));
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (group->session != session) {
+ /* Different session than last time, so we need to do some work */
+ MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group %s\n",
+ session, group->session,
+ mali_group_core_description(group)));
+ mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session));
+ group->session = session;
+ } else {
+ /* Same session as last time, so no work required */
+ MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group %s\n",
+ session->page_directory,
+ mali_group_core_description(group)));
+ mali_mmu_zap_tlb_without_stall(group->mmu);
+ }
+}
+
+static void mali_group_recovery_reset(struct mali_group *group)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ /* Stop cores, bus stop */
+ if (NULL != group->pp_core) {
+ mali_pp_stop_bus(group->pp_core);
+ } else {
+ mali_gp_stop_bus(group->gp_core);
+ }
+
+ /* Flush MMU and clear page fault (if any) */
+ mali_mmu_activate_fault_flush_page_directory(group->mmu);
+ mali_mmu_page_fault_done(group->mmu);
+
+ /* Wait for cores to stop bus, then do a hard reset on them */
+ if (NULL != group->pp_core) {
+ if (mali_group_is_virtual(group)) {
+ struct mali_group *child, *temp;
+
+ /* Disable the broadcast unit while we do reset directly on the member cores. */
+ mali_bcast_disable(group->bcast_core);
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_stop_bus_wait(child->pp_core);
+ mali_pp_hard_reset(child->pp_core);
+ }
+
+ mali_bcast_enable(group->bcast_core);
+ } else {
+ mali_pp_stop_bus_wait(group->pp_core);
+ mali_pp_hard_reset(group->pp_core);
+ }
+ } else {
+ mali_gp_stop_bus_wait(group->gp_core);
+ mali_gp_hard_reset(group->gp_core);
+ }
+
+ /* Reset MMU */
+ err = mali_mmu_reset(group->mmu);
+ MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
+ MALI_IGNORE(err);
+
+ group->session = NULL;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size)
+{
+ int n = 0;
+ int i;
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ if (mali_group_is_virtual(group)) {
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP Group: %p\n", group);
+ } else if (mali_group_is_in_virtual(group)) {
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Child PP Group: %p\n", group);
+ } else if (NULL != group->pp_core) {
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP Group: %p\n", group);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP Group: %p\n", group);
+ }
+
+ switch (group->state) {
+ case MALI_GROUP_STATE_INACTIVE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tstate: INACTIVE\n");
+ break;
+ case MALI_GROUP_STATE_ACTIVATION_PENDING:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tstate: ACTIVATION_PENDING\n");
+ break;
+ case MALI_GROUP_STATE_ACTIVE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tstate: MALI_GROUP_STATE_ACTIVE\n");
+ break;
+ default:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tstate: UNKNOWN (%d)\n", group->state);
+ MALI_DEBUG_ASSERT(0);
+ break;
+ }
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tSW power: %s\n",
+ group->power_is_on ? "On" : "Off");
+
+ n += mali_pm_dump_state_domain(group->pm_domain, buf + n, size - n);
+
+ for (i = 0; i < 2; i++) {
+ if (NULL != group->l2_cache_core[i]) {
+ struct mali_pm_domain *domain;
+ domain = mali_l2_cache_get_pm_domain(
+ group->l2_cache_core[i]);
+ n += mali_pm_dump_state_domain(domain,
+ buf + n, size - n);
+ }
+ }
+
+ if (group->gp_core) {
+ n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tGP running job: %p\n", group->gp_running_job);
+ }
+
+ if (group->pp_core) {
+ n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tPP running job: %p, subjob %d \n",
+ group->pp_running_job,
+ group->pp_running_sub_job);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list,
+ struct mali_group, group_list) {
+ n += mali_group_dump_state(child, buf + n, size - n);
+ }
+
+ return n;
+}
+#endif
+
+_mali_osk_errcode_t mali_group_upper_half_mmu(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+ _mali_osk_errcode_t ret;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_lock();
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ }
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_unlock();
+#endif
+#endif
+
+ ret = mali_executor_interrupt_mmu(group, MALI_TRUE);
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_lock();
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ } else {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ }
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_unlock();
+#endif
+#endif
+
+ return ret;
+}
+
+static void mali_group_bottom_half_mmu(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ }
+
+ mali_executor_interrupt_mmu(group, MALI_FALSE);
+
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ } else {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ }
+}
+
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+ _mali_osk_errcode_t ret;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_lock();
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ mali_gp_get_rawstat(group->gp_core), 0);
+
+ MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
+ mali_gp_get_rawstat(group->gp_core),
+ mali_group_core_description(group)));
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_unlock();
+#endif
+#endif
+ ret = mali_executor_interrupt_gp(group, MALI_TRUE);
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_lock();
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ mali_gp_get_rawstat(group->gp_core), 0);
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_unlock();
+#endif
+#endif
+ return ret;
+}
+
+static void mali_group_bottom_half_gp(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ mali_gp_get_rawstat(group->gp_core), 0);
+
+ mali_executor_interrupt_gp(group, MALI_FALSE);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ mali_gp_get_rawstat(group->gp_core), 0);
+}
+
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+ _mali_osk_errcode_t ret;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_lock();
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_pp_get_rawstat(group->pp_core), 0);
+
+ MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
+ mali_pp_get_rawstat(group->pp_core),
+ mali_group_core_description(group)));
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_unlock();
+#endif
+#endif
+
+ ret = mali_executor_interrupt_pp(group, MALI_TRUE);
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_lock();
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_pp_get_rawstat(group->pp_core), 0);
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ mali_executor_unlock();
+#endif
+#endif
+ return ret;
+}
+
+static void mali_group_bottom_half_pp(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_pp_get_rawstat(group->pp_core), 0);
+
+ mali_executor_interrupt_pp(group, MALI_FALSE);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_pp_get_rawstat(group->pp_core), 0);
+}
+
+static void mali_group_timeout(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ MALI_DEBUG_PRINT(2, ("Group: timeout handler for %s at %u\n",
+ mali_group_core_description(group),
+ _mali_osk_time_tickcount()));
+
+ if (NULL != group->gp_core) {
+ mali_group_schedule_bottom_half_gp(group);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ mali_group_schedule_bottom_half_pp(group);
+ }
+}
+
+static void mali_group_out_of_memory(void *data)
+{
+ struct mali_group *group = (struct mali_group *)data;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+
+ mali_executor_group_oom(group);
+}
+
+mali_bool mali_group_zap_session(struct mali_group *group,
+ struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (group->session != session) {
+ /* not running from this session */
+ return MALI_TRUE; /* success */
+ }
+
+ if (group->is_working) {
+ /* The Zap also does the stall and disable_stall */
+ mali_bool zap_success = mali_mmu_zap_tlb(group->mmu);
+ return zap_success;
+ } else {
+ /* Just remove the session instead of zapping */
+ mali_group_clear_session(group);
+ return MALI_TRUE; /* success */
+ }
+}
+
+#if defined(CONFIG_MALI400_PROFILING)
+static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num)
+{
+ u32 source0 = 0;
+ u32 value0 = 0;
+ u32 source1 = 0;
+ u32 value1 = 0;
+ u32 profiling_channel = 0;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ switch (core_num) {
+ case 0:
+ profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+ break;
+ case 1:
+ profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS;
+ break;
+ case 2:
+ profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS;
+ break;
+ default:
+ profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS;
+ break;
+ }
+
+ if (0 == core_num) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+ }
+ if (1 == core_num) {
+ if (1 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+ } else if (1 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+ }
+ }
+ if (2 == core_num) {
+ if (2 == mali_l2_cache_get_id(group->l2_cache_core[0])) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1);
+ } else if (2 == mali_l2_cache_get_id(group->l2_cache_core[1])) {
+ mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1);
+ }
+ }
+
+ _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0);
+}
+#endif /* #if defined(CONFIG_MALI400_PROFILING) */
diff --git a/drivers/gpu/arm/utgard/common/mali_group.h b/drivers/gpu/arm/utgard/common/mali_group.h
new file mode 100644
index 000000000000..705605ec1d9f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_group.h
@@ -0,0 +1,467 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GROUP_H__
+#define __MALI_GROUP_H__
+
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_mmu.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_session.h"
+#include "mali_osk_profiling.h"
+
+/**
+ * @brief Default max runtime [ms] for a core job - used by timeout timers
+ */
+#define MALI_MAX_JOB_RUNTIME_DEFAULT 5000
+
+extern int mali_max_job_runtime;
+
+#define MALI_MAX_NUMBER_OF_GROUPS 10
+#define MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS 8
+
+enum mali_group_state {
+ MALI_GROUP_STATE_INACTIVE,
+ MALI_GROUP_STATE_ACTIVATION_PENDING,
+ MALI_GROUP_STATE_ACTIVE,
+};
+
+/**
+ * The structure represents a render group
+ * A render group is defined by all the cores that share the same Mali MMU
+ */
+
+struct mali_group {
+ struct mali_mmu_core *mmu;
+ struct mali_session_data *session;
+
+ enum mali_group_state state;
+ mali_bool power_is_on;
+
+ mali_bool is_working;
+ unsigned long start_time; /* in ticks */
+
+ struct mali_gp_core *gp_core;
+ struct mali_gp_job *gp_running_job;
+
+ struct mali_pp_core *pp_core;
+ struct mali_pp_job *pp_running_job;
+ u32 pp_running_sub_job;
+
+ struct mali_pm_domain *pm_domain;
+
+ struct mali_l2_cache_core *l2_cache_core[2];
+ u32 l2_cache_core_ref_count[2];
+
+ /* Parent virtual group (if any) */
+ struct mali_group *parent_group;
+
+ struct mali_dlbu_core *dlbu_core;
+ struct mali_bcast_unit *bcast_core;
+
+ /* Used for working groups which needs to be disabled */
+ mali_bool disable_requested;
+
+ /* Used by group to link child groups (for virtual group) */
+ _mali_osk_list_t group_list;
+
+ /* Used by executor module in order to link groups of same state */
+ _mali_osk_list_t executor_list;
+
+ /* Used by PM domains to link groups of same domain */
+ _mali_osk_list_t pm_domain_list;
+
+ _mali_osk_wq_work_t *bottom_half_work_mmu;
+ _mali_osk_wq_work_t *bottom_half_work_gp;
+ _mali_osk_wq_work_t *bottom_half_work_pp;
+
+ _mali_osk_wq_work_t *oom_work_handler;
+ _mali_osk_timer_t *timeout_timer;
+};
+
+/** @brief Create a new Mali group object
+ *
+ * @return A pointer to a new group object
+ */
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
+ struct mali_dlbu_core *dlbu,
+ struct mali_bcast_unit *bcast,
+ u32 domain_index);
+
+void mali_group_dump_status(struct mali_group *group);
+
+void mali_group_delete(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group,
+ struct mali_mmu_core *mmu_core);
+void mali_group_remove_mmu_core(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group,
+ struct mali_gp_core *gp_core);
+void mali_group_remove_gp_core(struct mali_group *group);
+
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group,
+ struct mali_pp_core *pp_core);
+void mali_group_remove_pp_core(struct mali_group *group);
+
+MALI_STATIC_INLINE const char *mali_group_core_description(
+ struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ if (NULL != group->pp_core) {
+ return mali_pp_core_description(group->pp_core);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ return mali_gp_core_description(group->gp_core);
+ }
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_is_virtual(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+ return (NULL != group->dlbu_core);
+#else
+ return MALI_FALSE;
+#endif
+}
+
+/** @brief Check if a group is a part of a virtual group or not
+ */
+MALI_STATIC_INLINE mali_bool mali_group_is_in_virtual(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+ return (NULL != group->parent_group) ? MALI_TRUE : MALI_FALSE;
+#else
+ return MALI_FALSE;
+#endif
+}
+
+/** @brief Reset group
+ *
+ * This function will reset the entire group,
+ * including all the cores present in the group.
+ *
+ * @param group Pointer to the group to reset
+ */
+void mali_group_reset(struct mali_group *group);
+
+MALI_STATIC_INLINE struct mali_session_data *mali_group_get_session(
+ struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ return group->session;
+}
+
+MALI_STATIC_INLINE void mali_group_clear_session(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (NULL != group->session) {
+ mali_mmu_activate_empty_page_directory(group->mmu);
+ group->session = NULL;
+ }
+}
+
+enum mali_group_state mali_group_activate(struct mali_group *group);
+
+/*
+ * Change state from ACTIVATION_PENDING to ACTIVE
+ * For virtual group, all childs need to be ACTIVE first
+ */
+mali_bool mali_group_set_active(struct mali_group *group);
+
+/*
+ * @return MALI_TRUE means one or more domains can now be powered off,
+ * and caller should call either mali_pm_update_async() or
+ * mali_pm_update_sync() in order to do so.
+ */
+mali_bool mali_group_deactivate(struct mali_group *group);
+
+MALI_STATIC_INLINE enum mali_group_state mali_group_get_state(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return group->state;
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_power_is_on(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ return group->power_is_on;
+}
+
+void mali_group_power_up(struct mali_group *group);
+void mali_group_power_down(struct mali_group *group);
+
+MALI_STATIC_INLINE void mali_group_set_disable_request(
+ struct mali_group *group, mali_bool disable)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ group->disable_requested = disable;
+
+ /**
+ * When one of child group's disable_requeset is set TRUE, then
+ * the disable_request of parent group should also be set to TRUE.
+ * While, the disable_request of parent group should only be set to FALSE
+ * only when all of its child group's disable_request are set to FALSE.
+ */
+ if (NULL != group->parent_group && MALI_TRUE == disable) {
+ group->parent_group->disable_requested = disable;
+ }
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_disable_requested(
+ struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return group->disable_requested;
+}
+
+/** @brief Virtual groups */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child);
+struct mali_group *mali_group_acquire_group(struct mali_group *parent);
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child);
+
+/** @brief Checks if the group is working.
+ */
+MALI_STATIC_INLINE mali_bool mali_group_is_working(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ if (mali_group_is_in_virtual(group)) {
+ struct mali_group *tmp_group = mali_executor_get_virtual_group();
+ return tmp_group->is_working;
+ }
+ return group->is_working;
+}
+
+MALI_STATIC_INLINE struct mali_gp_job *mali_group_get_running_gp_job(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return group->gp_running_job;
+}
+
+/** @brief Zap MMU TLB on all groups
+ *
+ * Zap TLB on group if \a session is active.
+ */
+mali_bool mali_group_zap_session(struct mali_group *group,
+ struct mali_session_data *session);
+
+/** @brief Get pointer to GP core object
+ */
+MALI_STATIC_INLINE struct mali_gp_core *mali_group_get_gp_core(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ return group->gp_core;
+}
+
+/** @brief Get pointer to PP core object
+ */
+MALI_STATIC_INLINE struct mali_pp_core *mali_group_get_pp_core(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ return group->pp_core;
+}
+
+/** @brief Start GP job
+ */
+void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job);
+
+void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job);
+
+/** @brief Start virtual group Job on a virtual group
+*/
+void mali_group_start_job_on_virtual(struct mali_group *group, struct mali_pp_job *job, u32 first_subjob, u32 last_subjob);
+
+
+/** @brief Start a subjob from a particular on a specific PP group
+*/
+void mali_group_start_job_on_group(struct mali_group *group, struct mali_pp_job *job, u32 subjob);
+
+
+/** @brief remove all the unused groups in tmp_unused group list, so that the group is in consistent status.
+ */
+void mali_group_non_dlbu_job_done_virtual(struct mali_group *group);
+
+
+/** @brief Resume GP job that suspended waiting for more heap memory
+ */
+void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr);
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_gp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_gp_get_interrupt_result(group->gp_core);
+}
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_pp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_pp_get_interrupt_result(group->pp_core);
+}
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_mmu(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_mmu_get_interrupt_result(group->mmu);
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_gp_is_active(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_gp_is_active(group->gp_core);
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_pp_is_active(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_pp_is_active(group->pp_core);
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_has_timed_out(struct mali_group *group)
+{
+ unsigned long time_cost;
+ struct mali_group *tmp_group = group;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ /* if the group is in virtual need to use virtual_group's start time */
+ if (mali_group_is_in_virtual(group)) {
+ tmp_group = mali_executor_get_virtual_group();
+ }
+
+ time_cost = _mali_osk_time_tickcount() - tmp_group->start_time;
+ if (_mali_osk_time_mstoticks(mali_max_job_runtime) <= time_cost) {
+ /*
+ * current tick is at or after timeout end time,
+ * so this is a valid timeout
+ */
+ return MALI_TRUE;
+ } else {
+ /*
+ * Not a valid timeout. A HW interrupt probably beat
+ * us to it, and the timer wasn't properly deleted
+ * (async deletion used due to atomic context).
+ */
+ return MALI_FALSE;
+ }
+}
+
+MALI_STATIC_INLINE void mali_group_mask_all_interrupts_gp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_gp_mask_all_interrupts(group->gp_core);
+}
+
+MALI_STATIC_INLINE void mali_group_mask_all_interrupts_pp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_pp_mask_all_interrupts(group->pp_core);
+}
+
+MALI_STATIC_INLINE void mali_group_enable_interrupts_gp(
+ struct mali_group *group,
+ enum mali_interrupt_result exceptions)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ mali_gp_enable_interrupts(group->gp_core, exceptions);
+}
+
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_gp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+}
+
+MALI_STATIC_INLINE void mali_group_schedule_oom_work_handler(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ _mali_osk_wq_schedule_work(group->oom_work_handler);
+}
+
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_pp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
+}
+
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_mmu(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+ _mali_osk_wq_schedule_work(group->bottom_half_work_mmu);
+}
+
+struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job);
+
+struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success);
+
+#if defined(CONFIG_MALI400_PROFILING)
+MALI_STATIC_INLINE void mali_group_oom(struct mali_group *group)
+{
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ 0, 0, 0, 0, 0);
+}
+#endif
+
+struct mali_group *mali_group_get_glob_group(u32 index);
+u32 mali_group_get_glob_num_groups(void);
+
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size);
+
+
+_mali_osk_errcode_t mali_group_upper_half_mmu(void *data);
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data);
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data);
+
+MALI_STATIC_INLINE mali_bool mali_group_is_empty(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return _mali_osk_list_empty(&group->group_list);
+}
+
+#endif /* __MALI_GROUP_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_hw_core.c b/drivers/gpu/arm/utgard/common/mali_hw_core.c
new file mode 100644
index 000000000000..c90cf38d8516
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_hw_core.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_osk_mali.h"
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size)
+{
+ core->phys_addr = resource->base;
+ core->phys_offset = resource->base - _mali_osk_resource_base_address();
+ core->description = resource->description;
+ core->size = reg_size;
+
+ MALI_DEBUG_ASSERT(core->phys_offset < core->phys_addr);
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_mem_reqregion(core->phys_addr, core->size, core->description)) {
+ core->mapped_registers = _mali_osk_mem_mapioregion(core->phys_addr, core->size, core->description);
+ if (NULL != core->mapped_registers) {
+ return _MALI_OSK_ERR_OK;
+ } else {
+ MALI_PRINT_ERROR(("Failed to map memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+ }
+ _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+ } else {
+ MALI_PRINT_ERROR(("Failed to request memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_hw_core_delete(struct mali_hw_core *core)
+{
+ if (NULL != core->mapped_registers) {
+ _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers);
+ core->mapped_registers = NULL;
+ }
+ _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_hw_core.h b/drivers/gpu/arm/utgard/common/mali_hw_core.h
new file mode 100644
index 000000000000..ac2ffbedf308
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_hw_core.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_HW_CORE_H__
+#define __MALI_HW_CORE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * The common parts for all Mali HW cores (GP, PP, MMU, L2 and PMU)
+ * This struct is embedded inside all core specific structs.
+ */
+struct mali_hw_core {
+ uintptr_t phys_addr; /**< Physical address of the registers */
+ u32 phys_offset; /**< Offset from start of Mali to registers */
+ u32 size; /**< Size of registers */
+ mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+ const char *description; /**< Name of unit (as specified in device configuration) */
+};
+
+#define MALI_REG_POLL_COUNT_FAST 1000000
+#define MALI_REG_POLL_COUNT_SLOW 1000000
+
+/*
+ * GP and PP core translate their int_stat/rawstat into one of these
+ */
+enum mali_interrupt_result {
+ MALI_INTERRUPT_RESULT_NONE,
+ MALI_INTERRUPT_RESULT_SUCCESS,
+ MALI_INTERRUPT_RESULT_SUCCESS_VS,
+ MALI_INTERRUPT_RESULT_SUCCESS_PLBU,
+ MALI_INTERRUPT_RESULT_OOM,
+ MALI_INTERRUPT_RESULT_ERROR
+};
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size);
+void mali_hw_core_delete(struct mali_hw_core *core);
+
+MALI_STATIC_INLINE u32 mali_hw_core_register_read(struct mali_hw_core *core, u32 relative_address)
+{
+ u32 read_val;
+ read_val = _mali_osk_mem_ioread32(core->mapped_registers, relative_address);
+ MALI_DEBUG_PRINT(6, ("register_read for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, read_val));
+ return read_val;
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+}
+
+/* Conditionally write a register.
+ * The register will only be written if the new value is different from the old_value.
+ * If the new value is different, the old value will also be updated */
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 new_val, const u32 old_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ if (old_val != new_val) {
+ _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+ }
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ _mali_osk_mem_iowrite32(core->mapped_registers, relative_address, new_val);
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs)
+{
+ u32 i;
+ MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+ core->description, relative_address, nr_of_regs));
+
+ /* Do not use burst writes against the registers */
+ for (i = 0; i < nr_of_regs; i++) {
+ mali_hw_core_register_write_relaxed(core, relative_address + i * 4, write_array[i]);
+ }
+}
+
+/* Conditionally write a set of registers.
+ * The register will only be written if the new value is different from the old_value.
+ * If the new value is different, the old value will also be updated */
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs, const u32 *old_array)
+{
+ u32 i;
+ MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+ core->description, relative_address, nr_of_regs));
+
+ /* Do not use burst writes against the registers */
+ for (i = 0; i < nr_of_regs; i++) {
+ if (old_array[i] != write_array[i]) {
+ mali_hw_core_register_write_relaxed(core, relative_address + i * 4, write_array[i]);
+ }
+ }
+}
+
+#endif /* __MALI_HW_CORE_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_common.h b/drivers/gpu/arm/utgard/common/mali_kernel_common.h
new file mode 100644
index 000000000000..990cf3ac78e0
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_kernel_common.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_COMMON_H__
+#define __MALI_KERNEL_COMMON_H__
+
+#include "mali_osk.h"
+
+/* Make sure debug is defined when it should be */
+#ifndef DEBUG
+#if defined(_DEBUG)
+#define DEBUG
+#endif
+#endif
+
+/* The file include several useful macros for error checking, debugging and printing.
+ * - MALI_PRINTF(...) Do not use this function: Will be included in Release builds.
+ * - MALI_DEBUG_PRINT(nr, (X) ) Prints the second argument if nr<=MALI_DEBUG_LEVEL.
+ * - MALI_DEBUG_ERROR( (X) ) Prints an errortext, a source trace, and the given error message.
+ * - MALI_DEBUG_ASSERT(exp,(X)) If the asserted expr is false, the program will exit.
+ * - MALI_DEBUG_ASSERT_POINTER(pointer) Triggers if the pointer is a zero pointer.
+ * - MALI_DEBUG_CODE( X ) The code inside the macro is only compiled in Debug builds.
+ *
+ * The (X) means that you must add an extra parenthesis around the argumentlist.
+ *
+ * The printf function: MALI_PRINTF(...) is routed to _mali_osk_debugmsg
+ *
+ * Suggested range for the DEBUG-LEVEL is [1:6] where
+ * [1:2] Is messages with highest priority, indicate possible errors.
+ * [3:4] Is messages with medium priority, output important variables.
+ * [5:6] Is messages with low priority, used during extensive debugging.
+ */
+
+/**
+* Fundamental error macro. Reports an error code. This is abstracted to allow us to
+* easily switch to a different error reporting method if we want, and also to allow
+* us to search for error returns easily.
+*
+* Note no closing semicolon - this is supplied in typical usage:
+*
+* MALI_ERROR(MALI_ERROR_OUT_OF_MEMORY);
+*/
+#define MALI_ERROR(error_code) return (error_code)
+
+/**
+ * Basic error macro, to indicate success.
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_SUCCESS;
+ */
+#define MALI_SUCCESS MALI_ERROR(_MALI_OSK_ERR_OK)
+
+/**
+ * Basic error macro. This checks whether the given condition is true, and if not returns
+ * from this function with the supplied error code. This is a macro so that we can override it
+ * for stress testing.
+ *
+ * Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling
+ * else clauses. Note also no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_CHECK((p!=NULL), ERROR_NO_OBJECT);
+ */
+#define MALI_CHECK(condition, error_code) do { if(!(condition)) MALI_ERROR(error_code); } while(0)
+
+/**
+ * Error propagation macro. If the expression given is anything other than
+ * _MALI_OSK_NO_ERROR, then the value is returned from the enclosing function
+ * as an error code. This effectively acts as a guard clause, and propagates
+ * error values up the call stack. This uses a temporary value to ensure that
+ * the error expression is not evaluated twice.
+ * If the counter for forcing a failure has been set using _mali_force_error,
+ * this error will be returned without evaluating the expression in
+ * MALI_CHECK_NO_ERROR
+ */
+#define MALI_CHECK_NO_ERROR(expression) \
+ do { _mali_osk_errcode_t _check_no_error_result=(expression); \
+ if(_check_no_error_result != _MALI_OSK_ERR_OK) \
+ MALI_ERROR(_check_no_error_result); \
+ } while(0)
+
+/**
+ * Pointer check macro. Checks non-null pointer.
+ */
+#define MALI_CHECK_NON_NULL(pointer, error_code) MALI_CHECK( ((pointer)!=NULL), (error_code) )
+
+/**
+ * Error macro with goto. This checks whether the given condition is true, and if not jumps
+ * to the specified label using a goto. The label must therefore be local to the function in
+ * which this macro appears. This is most usually used to execute some clean-up code before
+ * exiting with a call to ERROR.
+ *
+ * Like the other macros, this is a macro to allow us to override the condition if we wish,
+ * e.g. to force an error during stress testing.
+ */
+#define MALI_CHECK_GOTO(condition, label) do { if(!(condition)) goto label; } while(0)
+
+/**
+ * Explicitly ignore a parameter passed into a function, to suppress compiler warnings.
+ * Should only be used with parameter names.
+ */
+#define MALI_IGNORE(x) x=x
+
+#if defined(CONFIG_MALI_QUIET)
+#define MALI_PRINTF(args)
+#else
+#define MALI_PRINTF(args) _mali_osk_dbgmsg args;
+#endif
+
+#define MALI_PRINT_ERROR(args) do{ \
+ MALI_PRINTF(("Mali: ERR: %s\n" ,__FILE__)); \
+ MALI_PRINTF((" %s()%4d\n ", __FUNCTION__, __LINE__)) ; \
+ MALI_PRINTF(args); \
+ MALI_PRINTF(("\n")); \
+ } while(0)
+
+#define MALI_PRINT(args) do{ \
+ MALI_PRINTF(("Mali: ")); \
+ MALI_PRINTF(args); \
+ } while (0)
+
+#ifdef DEBUG
+#ifndef mali_debug_level
+extern int mali_debug_level;
+#endif
+
+#define MALI_DEBUG_CODE(code) code
+#define MALI_DEBUG_PRINT(level, args) do { \
+ if((level) <= mali_debug_level)\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \
+ } while (0)
+
+#define MALI_DEBUG_PRINT_ERROR(args) MALI_PRINT_ERROR(args)
+
+#define MALI_DEBUG_PRINT_IF(level,condition,args) \
+ if((condition)&&((level) <= mali_debug_level))\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+#define MALI_DEBUG_PRINT_ELSE(level, args)\
+ else if((level) <= mali_debug_level)\
+ { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+/**
+ * @note these variants of DEBUG ASSERTS will cause a debugger breakpoint
+ * to be entered (see _mali_osk_break() ). An alternative would be to call
+ * _mali_osk_abort(), on OSs that support it.
+ */
+#define MALI_DEBUG_PRINT_ASSERT(condition, args) do {if( !(condition)) { MALI_PRINT_ERROR(args); _mali_osk_break(); } } while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) {MALI_PRINT_ERROR(("NULL pointer " #pointer)); _mali_osk_break();} } while(0)
+#define MALI_DEBUG_ASSERT(condition) do {if( !(condition)) {MALI_PRINT_ERROR(("ASSERT failed: " #condition )); _mali_osk_break();} } while(0)
+
+#else /* DEBUG */
+
+#define MALI_DEBUG_CODE(code)
+#define MALI_DEBUG_PRINT(string,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ERROR(args) do {} while(0)
+#define MALI_DEBUG_PRINT_IF(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ELSE(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ASSERT(condition,args) do {} while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#define MALI_DEBUG_ASSERT(condition) do {} while(0)
+
+#endif /* DEBUG */
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __MALI_KERNEL_COMMON_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_core.c b/drivers/gpu/arm/utgard/common/mali_kernel_core.c
new file mode 100644
index 000000000000..b9a6b7a0a277
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_kernel_core.c
@@ -0,0 +1,1314 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_kernel_core.h"
+#include "mali_memory.h"
+#include "mali_mem_validation.h"
+#include "mali_mmu.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_dlbu.h"
+#include "mali_broadcast.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_executor.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_pmu.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_utilization.h"
+#include "mali_l2_cache.h"
+#include "mali_timeline.h"
+#include "mali_soft_job.h"
+#include "mali_pm_domain.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include "mali_profiling_internal.h"
+#endif
+#include "mali_control_timer.h"
+#include "mali_dvfs_policy.h"
+#include <linux/sched.h>
+
+#define MALI_SHARED_MEMORY_DEFAULT_SIZE 0xffffffff
+
+/* Mali GPU memory. Real values come from module parameter or from device specific data */
+unsigned int mali_dedicated_mem_start = 0;
+unsigned int mali_dedicated_mem_size = 0;
+
+/* Default shared memory size is set to 4G. */
+unsigned int mali_shared_mem_size = MALI_SHARED_MEMORY_DEFAULT_SIZE;
+
+/* Frame buffer memory to be accessible by Mali GPU */
+int mali_fb_start = 0;
+int mali_fb_size = 0;
+
+/* Mali max job runtime */
+extern int mali_max_job_runtime;
+
+/** Start profiling from module load? */
+int mali_boot_profiling = 0;
+
+/** Limits for the number of PP cores behind each L2 cache. */
+int mali_max_pp_cores_group_1 = 0xFF;
+int mali_max_pp_cores_group_2 = 0xFF;
+
+int mali_inited_pp_cores_group_1 = 0;
+int mali_inited_pp_cores_group_2 = 0;
+
+static _mali_product_id_t global_product_id = _MALI_PRODUCT_ID_UNKNOWN;
+static uintptr_t global_gpu_base_address = 0;
+static u32 global_gpu_major_version = 0;
+static u32 global_gpu_minor_version = 0;
+
+mali_bool mali_gpu_class_is_mali450 = MALI_FALSE;
+mali_bool mali_gpu_class_is_mali470 = MALI_FALSE;
+
+static _mali_osk_errcode_t mali_set_global_gpu_base_address(void)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ global_gpu_base_address = _mali_osk_resource_base_address();
+ if (0 == global_gpu_base_address) {
+ err = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return err;
+}
+
+static u32 mali_get_bcast_id(_mali_osk_resource_t *resource_pp)
+{
+ switch (resource_pp->base - global_gpu_base_address) {
+ case 0x08000:
+ case 0x20000: /* fall-through for aliased mapping */
+ return 0x01;
+ case 0x0A000:
+ case 0x22000: /* fall-through for aliased mapping */
+ return 0x02;
+ case 0x0C000:
+ case 0x24000: /* fall-through for aliased mapping */
+ return 0x04;
+ case 0x0E000:
+ case 0x26000: /* fall-through for aliased mapping */
+ return 0x08;
+ case 0x28000:
+ return 0x10;
+ case 0x2A000:
+ return 0x20;
+ case 0x2C000:
+ return 0x40;
+ case 0x2E000:
+ return 0x80;
+ default:
+ return 0;
+ }
+}
+
+static _mali_osk_errcode_t mali_parse_product_info(void)
+{
+ _mali_osk_resource_t first_pp_resource;
+
+ /* Find the first PP core resource (again) */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_PP0, &first_pp_resource)) {
+ /* Create a dummy PP object for this core so that we can read the version register */
+ struct mali_group *group = mali_group_create(NULL, NULL, NULL, MALI_DOMAIN_INDEX_PP0);
+ if (NULL != group) {
+ struct mali_pp_core *pp_core = mali_pp_create(&first_pp_resource, group, MALI_FALSE, mali_get_bcast_id(&first_pp_resource));
+ if (NULL != pp_core) {
+ u32 pp_version;
+
+ pp_version = mali_pp_core_get_version(pp_core);
+
+ mali_group_delete(group);
+
+ global_gpu_major_version = (pp_version >> 8) & 0xFF;
+ global_gpu_minor_version = pp_version & 0xFF;
+
+ switch (pp_version >> 16) {
+ case MALI200_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI200;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-200 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ MALI_PRINT_ERROR(("Mali-200 is not supported by this driver.\n"));
+ _mali_osk_abort();
+ break;
+ case MALI300_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI300;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-300 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI400_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI400;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-400 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI450_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI450;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-450 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI470_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI470;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-470 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ default:
+ MALI_DEBUG_PRINT(2, ("Found unknown Mali GPU (r%up%u)\n", global_gpu_major_version, global_gpu_minor_version));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+ } else {
+ MALI_PRINT_ERROR(("Failed to create initial PP object\n"));
+ }
+ } else {
+ MALI_PRINT_ERROR(("Failed to create initial group object\n"));
+ }
+ } else {
+ MALI_PRINT_ERROR(("First PP core not specified in config file\n"));
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+static void mali_delete_groups(void)
+{
+ struct mali_group *group;
+
+ group = mali_group_get_glob_group(0);
+ while (NULL != group) {
+ mali_group_delete(group);
+ group = mali_group_get_glob_group(0);
+ }
+
+ MALI_DEBUG_ASSERT(0 == mali_group_get_glob_num_groups());
+}
+
+static void mali_delete_l2_cache_cores(void)
+{
+ struct mali_l2_cache_core *l2;
+
+ l2 = mali_l2_cache_core_get_glob_l2_core(0);
+ while (NULL != l2) {
+ mali_l2_cache_delete(l2);
+ l2 = mali_l2_cache_core_get_glob_l2_core(0);
+ }
+
+ MALI_DEBUG_ASSERT(0 == mali_l2_cache_core_get_glob_num_l2_cores());
+}
+
+static struct mali_l2_cache_core *mali_create_l2_cache_core(_mali_osk_resource_t *resource, u32 domain_index)
+{
+ struct mali_l2_cache_core *l2_cache = NULL;
+
+ if (NULL != resource) {
+
+ MALI_DEBUG_PRINT(3, ("Found L2 cache %s\n", resource->description));
+
+ l2_cache = mali_l2_cache_create(resource, domain_index);
+ if (NULL == l2_cache) {
+ MALI_PRINT_ERROR(("Failed to create L2 cache object\n"));
+ return NULL;
+ }
+ }
+ MALI_DEBUG_PRINT(3, ("Created L2 cache core object\n"));
+
+ return l2_cache;
+}
+
+static _mali_osk_errcode_t mali_parse_config_l2_cache(void)
+{
+ struct mali_l2_cache_core *l2_cache = NULL;
+
+ if (mali_is_mali400()) {
+ _mali_osk_resource_t l2_resource;
+ if (_MALI_OSK_ERR_OK != _mali_osk_resource_find(MALI400_OFFSET_L2_CACHE0, &l2_resource)) {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ l2_cache = mali_create_l2_cache_core(&l2_resource, MALI_DOMAIN_INDEX_L20);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ } else if (mali_is_mali450()) {
+ /*
+ * L2 for GP at 0x10000
+ * L2 for PP0-3 at 0x01000
+ * L2 for PP4-7 at 0x11000 (optional)
+ */
+
+ _mali_osk_resource_t l2_gp_resource;
+ _mali_osk_resource_t l2_pp_grp0_resource;
+ _mali_osk_resource_t l2_pp_grp1_resource;
+
+ /* Make cluster for GP's L2 */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE0, &l2_gp_resource)) {
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for GP\n"));
+ l2_cache = mali_create_l2_cache_core(&l2_gp_resource, MALI_DOMAIN_INDEX_L20);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ } else {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for GP in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Find corresponding l2 domain */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE1, &l2_pp_grp0_resource)) {
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 0\n"));
+ l2_cache = mali_create_l2_cache_core(&l2_pp_grp0_resource, MALI_DOMAIN_INDEX_L21);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ } else {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for PP group 0 in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Second PP core group is optional, don't fail if we don't find it */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE2, &l2_pp_grp1_resource)) {
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 1\n"));
+ l2_cache = mali_create_l2_cache_core(&l2_pp_grp1_resource, MALI_DOMAIN_INDEX_L22);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+ } else if (mali_is_mali470()) {
+ _mali_osk_resource_t l2c1_resource;
+
+ /* Make cluster for L2C1 */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI470_OFFSET_L2_CACHE1, &l2c1_resource)) {
+ MALI_DEBUG_PRINT(3, ("Creating Mali-470 L2 cache 1\n"));
+ l2_cache = mali_create_l2_cache_core(&l2c1_resource, MALI_DOMAIN_INDEX_L21);
+ if (NULL == l2_cache) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ } else {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for L2C1\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static struct mali_group *mali_create_group(struct mali_l2_cache_core *cache,
+ _mali_osk_resource_t *resource_mmu,
+ _mali_osk_resource_t *resource_gp,
+ _mali_osk_resource_t *resource_pp,
+ u32 domain_index)
+{
+ struct mali_mmu_core *mmu;
+ struct mali_group *group;
+
+ MALI_DEBUG_PRINT(3, ("Starting new group for MMU %s\n", resource_mmu->description));
+
+ /* Create the group object */
+ group = mali_group_create(cache, NULL, NULL, domain_index);
+ if (NULL == group) {
+ MALI_PRINT_ERROR(("Failed to create group object for MMU %s\n", resource_mmu->description));
+ return NULL;
+ }
+
+ /* Create the MMU object inside group */
+ mmu = mali_mmu_create(resource_mmu, group, MALI_FALSE);
+ if (NULL == mmu) {
+ MALI_PRINT_ERROR(("Failed to create MMU object\n"));
+ mali_group_delete(group);
+ return NULL;
+ }
+
+ if (NULL != resource_gp) {
+ /* Create the GP core object inside this group */
+ struct mali_gp_core *gp_core = mali_gp_create(resource_gp, group);
+ if (NULL == gp_core) {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create GP object\n"));
+ mali_group_delete(group);
+ return NULL;
+ }
+ }
+
+ if (NULL != resource_pp) {
+ struct mali_pp_core *pp_core;
+
+ /* Create the PP core object inside this group */
+ pp_core = mali_pp_create(resource_pp, group, MALI_FALSE, mali_get_bcast_id(resource_pp));
+ if (NULL == pp_core) {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create PP object\n"));
+ mali_group_delete(group);
+ return NULL;
+ }
+ }
+
+ return group;
+}
+
+static _mali_osk_errcode_t mali_create_virtual_group(_mali_osk_resource_t *resource_mmu_pp_bcast,
+ _mali_osk_resource_t *resource_pp_bcast,
+ _mali_osk_resource_t *resource_dlbu,
+ _mali_osk_resource_t *resource_bcast)
+{
+ struct mali_mmu_core *mmu_pp_bcast_core;
+ struct mali_pp_core *pp_bcast_core;
+ struct mali_dlbu_core *dlbu_core;
+ struct mali_bcast_unit *bcast_core;
+ struct mali_group *group;
+
+ MALI_DEBUG_PRINT(2, ("Starting new virtual group for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
+
+ /* Create the DLBU core object */
+ dlbu_core = mali_dlbu_create(resource_dlbu);
+ if (NULL == dlbu_core) {
+ MALI_PRINT_ERROR(("Failed to create DLBU object \n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the Broadcast unit core */
+ bcast_core = mali_bcast_unit_create(resource_bcast);
+ if (NULL == bcast_core) {
+ MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n"));
+ mali_dlbu_delete(dlbu_core);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the group object */
+#if defined(DEBUG)
+ /* Get a physical PP group to temporarily add to broadcast unit. IRQ
+ * verification needs a physical group in the broadcast unit to test
+ * the broadcast unit interrupt line. */
+ {
+ struct mali_group *phys_group = NULL;
+ int i;
+ for (i = 0; i < mali_group_get_glob_num_groups(); i++) {
+ phys_group = mali_group_get_glob_group(i);
+ if (NULL != mali_group_get_pp_core(phys_group)) break;
+ }
+ MALI_DEBUG_ASSERT(NULL != mali_group_get_pp_core(phys_group));
+
+ /* Add the group temporarily to the broadcast, and update the
+ * broadcast HW. Since the HW is not updated when removing the
+ * group the IRQ check will work when the virtual PP is created
+ * later.
+ *
+ * When the virtual group gets populated, the actually used
+ * groups will be added to the broadcast unit and the HW will
+ * be updated.
+ */
+ mali_bcast_add_group(bcast_core, phys_group);
+ mali_bcast_reset(bcast_core);
+ mali_bcast_remove_group(bcast_core, phys_group);
+ }
+#endif /* DEBUG */
+ group = mali_group_create(NULL, dlbu_core, bcast_core, MALI_DOMAIN_INDEX_DUMMY);
+ if (NULL == group) {
+ MALI_PRINT_ERROR(("Failed to create group object for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
+ mali_bcast_unit_delete(bcast_core);
+ mali_dlbu_delete(dlbu_core);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the MMU object inside group */
+ mmu_pp_bcast_core = mali_mmu_create(resource_mmu_pp_bcast, group, MALI_TRUE);
+ if (NULL == mmu_pp_bcast_core) {
+ MALI_PRINT_ERROR(("Failed to create MMU PP broadcast object\n"));
+ mali_group_delete(group);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the PP core object inside this group */
+ pp_bcast_core = mali_pp_create(resource_pp_bcast, group, MALI_TRUE, 0);
+ if (NULL == pp_bcast_core) {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create PP object\n"));
+ mali_group_delete(group);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_groups(void)
+{
+ struct mali_group *group;
+ int cluster_id_gp = 0;
+ int cluster_id_pp_grp0 = 0;
+ int cluster_id_pp_grp1 = 0;
+ int i;
+
+ _mali_osk_resource_t resource_gp;
+ _mali_osk_resource_t resource_gp_mmu;
+ _mali_osk_resource_t resource_pp[8];
+ _mali_osk_resource_t resource_pp_mmu[8];
+ _mali_osk_resource_t resource_pp_mmu_bcast;
+ _mali_osk_resource_t resource_pp_bcast;
+ _mali_osk_resource_t resource_dlbu;
+ _mali_osk_resource_t resource_bcast;
+ _mali_osk_errcode_t resource_gp_found;
+ _mali_osk_errcode_t resource_gp_mmu_found;
+ _mali_osk_errcode_t resource_pp_found[8];
+ _mali_osk_errcode_t resource_pp_mmu_found[8];
+ _mali_osk_errcode_t resource_pp_mmu_bcast_found;
+ _mali_osk_errcode_t resource_pp_bcast_found;
+ _mali_osk_errcode_t resource_dlbu_found;
+ _mali_osk_errcode_t resource_bcast_found;
+
+ if (!(mali_is_mali400() || mali_is_mali450() || mali_is_mali470())) {
+ /* No known HW core */
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (MALI_MAX_JOB_RUNTIME_DEFAULT == mali_max_job_runtime) {
+ /* Group settings are not overridden by module parameters, so use device settings */
+ _mali_osk_device_data data = { 0, };
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Use device specific settings (if defined) */
+ if (0 != data.max_job_runtime) {
+ mali_max_job_runtime = data.max_job_runtime;
+ }
+ }
+ }
+
+ if (mali_is_mali450()) {
+ /* Mali-450 have separate L2s for GP, and PP core group(s) */
+ cluster_id_pp_grp0 = 1;
+ cluster_id_pp_grp1 = 2;
+ }
+
+ resource_gp_found = _mali_osk_resource_find(MALI_OFFSET_GP, &resource_gp);
+ resource_gp_mmu_found = _mali_osk_resource_find(MALI_OFFSET_GP_MMU, &resource_gp_mmu);
+ resource_pp_found[0] = _mali_osk_resource_find(MALI_OFFSET_PP0, &(resource_pp[0]));
+ resource_pp_found[1] = _mali_osk_resource_find(MALI_OFFSET_PP1, &(resource_pp[1]));
+ resource_pp_found[2] = _mali_osk_resource_find(MALI_OFFSET_PP2, &(resource_pp[2]));
+ resource_pp_found[3] = _mali_osk_resource_find(MALI_OFFSET_PP3, &(resource_pp[3]));
+ resource_pp_found[4] = _mali_osk_resource_find(MALI_OFFSET_PP4, &(resource_pp[4]));
+ resource_pp_found[5] = _mali_osk_resource_find(MALI_OFFSET_PP5, &(resource_pp[5]));
+ resource_pp_found[6] = _mali_osk_resource_find(MALI_OFFSET_PP6, &(resource_pp[6]));
+ resource_pp_found[7] = _mali_osk_resource_find(MALI_OFFSET_PP7, &(resource_pp[7]));
+ resource_pp_mmu_found[0] = _mali_osk_resource_find(MALI_OFFSET_PP0_MMU, &(resource_pp_mmu[0]));
+ resource_pp_mmu_found[1] = _mali_osk_resource_find(MALI_OFFSET_PP1_MMU, &(resource_pp_mmu[1]));
+ resource_pp_mmu_found[2] = _mali_osk_resource_find(MALI_OFFSET_PP2_MMU, &(resource_pp_mmu[2]));
+ resource_pp_mmu_found[3] = _mali_osk_resource_find(MALI_OFFSET_PP3_MMU, &(resource_pp_mmu[3]));
+ resource_pp_mmu_found[4] = _mali_osk_resource_find(MALI_OFFSET_PP4_MMU, &(resource_pp_mmu[4]));
+ resource_pp_mmu_found[5] = _mali_osk_resource_find(MALI_OFFSET_PP5_MMU, &(resource_pp_mmu[5]));
+ resource_pp_mmu_found[6] = _mali_osk_resource_find(MALI_OFFSET_PP6_MMU, &(resource_pp_mmu[6]));
+ resource_pp_mmu_found[7] = _mali_osk_resource_find(MALI_OFFSET_PP7_MMU, &(resource_pp_mmu[7]));
+
+
+ if (mali_is_mali450() || mali_is_mali470()) {
+ resource_bcast_found = _mali_osk_resource_find(MALI_OFFSET_BCAST, &resource_bcast);
+ resource_dlbu_found = _mali_osk_resource_find(MALI_OFFSET_DLBU, &resource_dlbu);
+ resource_pp_mmu_bcast_found = _mali_osk_resource_find(MALI_OFFSET_PP_BCAST_MMU, &resource_pp_mmu_bcast);
+ resource_pp_bcast_found = _mali_osk_resource_find(MALI_OFFSET_PP_BCAST, &resource_pp_bcast);
+
+ if (_MALI_OSK_ERR_OK != resource_bcast_found ||
+ _MALI_OSK_ERR_OK != resource_dlbu_found ||
+ _MALI_OSK_ERR_OK != resource_pp_mmu_bcast_found ||
+ _MALI_OSK_ERR_OK != resource_pp_bcast_found) {
+ /* Missing mandatory core(s) for Mali-450 or Mali-470 */
+ MALI_DEBUG_PRINT(2, ("Missing mandatory resources, Mali-450 needs DLBU, Broadcast unit, virtual PP core and virtual MMU\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK != resource_gp_found ||
+ _MALI_OSK_ERR_OK != resource_gp_mmu_found ||
+ _MALI_OSK_ERR_OK != resource_pp_found[0] ||
+ _MALI_OSK_ERR_OK != resource_pp_mmu_found[0]) {
+ /* Missing mandatory core(s) */
+ MALI_DEBUG_PRINT(2, ("Missing mandatory resource, need at least one GP and one PP, both with a separate MMU\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT(1 <= mali_l2_cache_core_get_glob_num_l2_cores());
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_gp), &resource_gp_mmu, &resource_gp, NULL, MALI_DOMAIN_INDEX_GP);
+ if (NULL == group) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create group for first (and mandatory) PP core */
+ MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= (cluster_id_pp_grp0 + 1)); /* >= 1 on Mali-300 and Mali-400, >= 2 on Mali-450 */
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[0], NULL, &resource_pp[0], MALI_DOMAIN_INDEX_PP0);
+ if (NULL == group) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_inited_pp_cores_group_1++;
+
+ /* Create groups for rest of the cores in the first PP core group */
+ for (i = 1; i < 4; i++) { /* First half of the PP cores belong to first core group */
+ if (mali_inited_pp_cores_group_1 < mali_max_pp_cores_group_1) {
+ if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[i], NULL, &resource_pp[i], MALI_DOMAIN_INDEX_PP0 + i);
+ if (NULL == group) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_inited_pp_cores_group_1++;
+ }
+ }
+ }
+
+ /* Create groups for cores in the second PP core group */
+ for (i = 4; i < 8; i++) { /* Second half of the PP cores belong to second core group */
+ if (mali_inited_pp_cores_group_2 < mali_max_pp_cores_group_2) {
+ if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
+ MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= 2); /* Only Mali-450 have a second core group */
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp1), &resource_pp_mmu[i], NULL, &resource_pp[i], MALI_DOMAIN_INDEX_PP0 + i);
+ if (NULL == group) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_inited_pp_cores_group_2++;
+ }
+ }
+ }
+
+ if (mali_is_mali450() || mali_is_mali470()) {
+ _mali_osk_errcode_t err = mali_create_virtual_group(&resource_pp_mmu_bcast, &resource_pp_bcast, &resource_dlbu, &resource_bcast);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+ }
+
+ mali_max_pp_cores_group_1 = mali_inited_pp_cores_group_1;
+ mali_max_pp_cores_group_2 = mali_inited_pp_cores_group_2;
+ MALI_DEBUG_PRINT(2, ("%d+%d PP cores initialized\n", mali_inited_pp_cores_group_1, mali_inited_pp_cores_group_2));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_check_shared_interrupts(void)
+{
+#if !defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_TRUE == _mali_osk_shared_interrupts()) {
+ MALI_PRINT_ERROR(("Shared interrupts detected, but driver support is not enabled\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif /* !defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+ /* It is OK to compile support for shared interrupts even if Mali is not using it. */
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_pmu(void)
+{
+ _mali_osk_resource_t resource_pmu;
+
+ MALI_DEBUG_ASSERT(0 != global_gpu_base_address);
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_PMU, &resource_pmu)) {
+ struct mali_pmu_core *pmu;
+
+ pmu = mali_pmu_create(&resource_pmu);
+ if (NULL == pmu) {
+ MALI_PRINT_ERROR(("Failed to create PMU\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ /* It's ok if the PMU doesn't exist */
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_memory(void)
+{
+ _mali_osk_device_data data = { 0, };
+ _mali_osk_errcode_t ret;
+
+ /* The priority of setting the value of mali_shared_mem_size,
+ * mali_dedicated_mem_start and mali_dedicated_mem_size:
+ * 1. module parameter;
+ * 2. platform data;
+ * 3. default value;
+ **/
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Memory settings are not overridden by module parameters, so use device settings */
+ if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size) {
+ /* Use device specific settings (if defined) */
+ mali_dedicated_mem_start = data.dedicated_mem_start;
+ mali_dedicated_mem_size = data.dedicated_mem_size;
+ }
+
+ if (MALI_SHARED_MEMORY_DEFAULT_SIZE == mali_shared_mem_size &&
+ 0 != data.shared_mem_size) {
+ mali_shared_mem_size = data.shared_mem_size;
+ }
+ }
+
+ if (0 < mali_dedicated_mem_size && 0 != mali_dedicated_mem_start) {
+ MALI_DEBUG_PRINT(2, ("Mali memory settings (dedicated: 0x%08X@0x%08X)\n",
+ mali_dedicated_mem_size, mali_dedicated_mem_start));
+
+ /* Dedicated memory */
+ ret = mali_memory_core_resource_dedicated_memory(mali_dedicated_mem_start, mali_dedicated_mem_size);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to register dedicated memory\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ }
+
+ if (0 < mali_shared_mem_size) {
+ MALI_DEBUG_PRINT(2, ("Mali memory settings (shared: 0x%08X)\n", mali_shared_mem_size));
+
+ /* Shared OS memory */
+ ret = mali_memory_core_resource_os_memory(mali_shared_mem_size);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to register shared OS memory\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ }
+
+ if (0 == mali_fb_start && 0 == mali_fb_size) {
+ /* Frame buffer settings are not overridden by module parameters, so use device settings */
+ _mali_osk_device_data data = { 0, };
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Use device specific settings (if defined) */
+ mali_fb_start = data.fb_start;
+ mali_fb_size = data.fb_size;
+ }
+
+ MALI_DEBUG_PRINT(2, ("Using device defined frame buffer settings (0x%08X@0x%08X)\n",
+ mali_fb_size, mali_fb_start));
+ } else {
+ MALI_DEBUG_PRINT(2, ("Using module defined frame buffer settings (0x%08X@0x%08X)\n",
+ mali_fb_size, mali_fb_start));
+ }
+
+ if (0 != mali_fb_size) {
+ /* Register frame buffer */
+ ret = mali_mem_validation_add_range(mali_fb_start, mali_fb_size);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to register frame buffer memory region\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static void mali_detect_gpu_class(void)
+{
+ if (_mali_osk_identify_gpu_resource() == 0x450)
+ mali_gpu_class_is_mali450 = MALI_TRUE;
+
+ if (_mali_osk_identify_gpu_resource() == 0x470)
+ mali_gpu_class_is_mali470 = MALI_TRUE;
+}
+
+static _mali_osk_errcode_t mali_init_hw_reset(void)
+{
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+ _mali_osk_resource_t resource_bcast;
+
+ /* Ensure broadcast unit is in a good state before we start creating
+ * groups and cores.
+ */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_BCAST, &resource_bcast)) {
+ struct mali_bcast_unit *bcast_core;
+
+ bcast_core = mali_bcast_unit_create(&resource_bcast);
+ if (NULL == bcast_core) {
+ MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mali_bcast_unit_delete(bcast_core);
+ }
+#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_initialize_subsystems(void)
+{
+ _mali_osk_errcode_t err;
+
+#if defined(CONFIG_MALI_DT) && !defined(CONFIG_MALI_PLAT_SPECIFIC_DT)
+ err = _mali_osk_resource_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+#endif
+
+ mali_pp_job_initialize();
+
+ mali_timeline_initialize();
+
+ err = mali_session_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+#if defined(CONFIG_MALI400_PROFILING)
+ err = _mali_osk_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
+ if (_MALI_OSK_ERR_OK != err) {
+ /* No biggie if we weren't able to initialize the profiling */
+ MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n"));
+ }
+#endif
+
+ err = mali_memory_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ err = mali_executor_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ err = mali_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* Configure memory early, needed by mali_mmu_initialize. */
+ err = mali_parse_config_memory();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ err = mali_set_global_gpu_base_address();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* Detect GPU class (uses L2 cache count) */
+ mali_detect_gpu_class();
+
+ err = mali_check_shared_interrupts();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* Initialize the MALI PMU (will not touch HW!) */
+ err = mali_parse_config_pmu();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* Initialize the power management module */
+ err = mali_pm_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* Make sure the entire GPU stays on for the rest of this function */
+ mali_pm_init_begin();
+
+ /* Ensure HW is in a good state before starting to access cores. */
+ err = mali_init_hw_reset();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* Detect which Mali GPU we are dealing with */
+ err = mali_parse_product_info();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* The global_product_id is now populated with the correct Mali GPU */
+
+ /* Start configuring the actual Mali hardware. */
+
+ err = mali_mmu_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ if (mali_is_mali450() || mali_is_mali470()) {
+ err = mali_dlbu_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+ }
+
+ err = mali_parse_config_l2_cache();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ err = mali_parse_config_groups();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* Move groups into executor */
+ mali_executor_populate();
+
+ /* Need call after all group has assigned a domain */
+ mali_pm_power_cost_setup();
+
+ /* Initialize the GPU timer */
+ err = mali_control_timer_init();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ /* Initialize the GPU utilization tracking */
+ err = mali_utilization_init();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+
+#if defined(CONFIG_MALI_DVFS)
+ err = mali_dvfs_policy_init();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+#endif
+
+ /* Allowing the system to be turned off */
+ mali_pm_init_end();
+
+ return _MALI_OSK_ERR_OK; /* all ok */
+}
+
+void mali_terminate_subsystems(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(2, ("terminate_subsystems() called\n"));
+
+ mali_utilization_term();
+ mali_control_timer_term();
+
+ mali_executor_depopulate();
+ mali_delete_groups(); /* Delete groups not added to executor */
+ mali_executor_terminate();
+
+ mali_scheduler_terminate();
+ mali_pp_job_terminate();
+ mali_delete_l2_cache_cores();
+ mali_mmu_terminate();
+
+ if (mali_is_mali450() || mali_is_mali470()) {
+ mali_dlbu_terminate();
+ }
+
+ mali_pm_terminate();
+
+ if (NULL != pmu) {
+ mali_pmu_delete(pmu);
+ }
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_term();
+#endif
+
+ mali_memory_terminate();
+
+ mali_session_terminate();
+
+ mali_timeline_terminate();
+
+ global_gpu_base_address = 0;
+}
+
+_mali_product_id_t mali_kernel_core_get_product_id(void)
+{
+ return global_product_id;
+}
+
+u32 mali_kernel_core_get_gpu_major_version(void)
+{
+ return global_gpu_major_version;
+}
+
+u32 mali_kernel_core_get_gpu_minor_version(void)
+{
+ return global_gpu_minor_version;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ /* check compatability */
+ if (args->version == _MALI_UK_API_VERSION) {
+ args->compatible = 1;
+ } else {
+ args->compatible = 0;
+ }
+
+ args->version = _MALI_UK_API_VERSION; /* report our version */
+
+ /* success regardless of being compatible or not */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ /* check compatability */
+ if (args->version == _MALI_UK_API_VERSION) {
+ args->compatible = 1;
+ } else {
+ args->compatible = 0;
+ }
+
+ args->version = _MALI_UK_API_VERSION; /* report our version */
+
+ /* success regardless of being compatible or not */
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args)
+{
+ _mali_osk_errcode_t err;
+ _mali_osk_notification_t *notification;
+ _mali_osk_notification_queue_t *queue;
+ struct mali_session_data *session;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ queue = session->ioctl_queue;
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue) {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
+ return _MALI_OSK_ERR_OK;
+ }
+
+ /* receive a notification, might sleep */
+ err = _mali_osk_notification_queue_receive(queue, &notification);
+ if (_MALI_OSK_ERR_OK != err) {
+ MALI_ERROR(err); /* errcode returned, pass on to caller */
+ }
+
+ /* copy the buffer to the user */
+ args->type = (_mali_uk_notification_type)notification->notification_type;
+ _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size);
+
+ /* finished with the notification */
+ _mali_osk_notification_delete(notification);
+
+ return _MALI_OSK_ERR_OK; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args)
+{
+ _mali_osk_notification_t *notification;
+ _mali_osk_notification_queue_t *queue;
+ struct mali_session_data *session;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ queue = session->ioctl_queue;
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue) {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ return _MALI_OSK_ERR_OK;
+ }
+
+ notification = _mali_osk_notification_create(args->type, 0);
+ if (NULL == notification) {
+ MALI_PRINT_ERROR(("Failed to create notification object\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _mali_osk_notification_queue_send(queue, notification);
+
+ return _MALI_OSK_ERR_OK; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_pending_submit(_mali_uk_pending_submit_s *args)
+{
+ wait_queue_head_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ queue = mali_session_get_wait_queue();
+
+ /* check pending big job number, might sleep if larger than MAX allowed number */
+ if (wait_event_interruptible(*queue, MALI_MAX_PENDING_BIG_JOB > mali_scheduler_job_gp_big_job_count())) {
+ return _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+
+ return _MALI_OSK_ERR_OK; /* all ok */
+}
+
+
+_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args)
+{
+ struct mali_session_data *session;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ if (!session->use_high_priority_job_queue) {
+ session->use_high_priority_job_queue = MALI_TRUE;
+ MALI_DEBUG_PRINT(2, ("Session 0x%08X with pid %d was granted higher priority.\n", session, _mali_osk_get_pid()));
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_open(void **context)
+{
+ u32 i;
+ struct mali_session_data *session;
+
+ /* allocated struct to track this session */
+ session = (struct mali_session_data *)_mali_osk_calloc(1, sizeof(struct mali_session_data));
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_NOMEM);
+
+ MALI_DEBUG_PRINT(3, ("Session starting\n"));
+
+ /* create a response queue for this session */
+ session->ioctl_queue = _mali_osk_notification_queue_init();
+ if (NULL == session->ioctl_queue) {
+ goto err;
+ }
+
+ session->page_directory = mali_mmu_pagedir_alloc();
+ if (NULL == session->page_directory) {
+ goto err_mmu;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_mmu_pagedir_map(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE)) {
+ MALI_PRINT_ERROR(("Failed to map DLBU page into session\n"));
+ goto err_mmu;
+ }
+
+ if (0 != mali_dlbu_phys_addr) {
+ mali_mmu_pagedir_update(session->page_directory, MALI_DLBU_VIRT_ADDR, mali_dlbu_phys_addr,
+ _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_memory_session_begin(session)) {
+ goto err_session;
+ }
+
+ /* Create soft system. */
+ session->soft_job_system = mali_soft_job_system_create(session);
+ if (NULL == session->soft_job_system) {
+ goto err_soft;
+ }
+
+ /* Create timeline system. */
+ session->timeline_system = mali_timeline_system_create(session);
+ if (NULL == session->timeline_system) {
+ goto err_time_line;
+ }
+
+#if defined(CONFIG_MALI_DVFS)
+ _mali_osk_atomic_init(&session->number_of_window_jobs, 0);
+#endif
+
+ session->use_high_priority_job_queue = MALI_FALSE;
+
+ /* Initialize list of PP jobs on this session. */
+ _MALI_OSK_INIT_LIST_HEAD(&session->pp_job_list);
+
+ /* Initialize the pp_job_fb_lookup_list array used to quickly lookup jobs from a given frame builder */
+ for (i = 0; i < MALI_PP_JOB_FB_LOOKUP_LIST_SIZE; ++i) {
+ _MALI_OSK_INIT_LIST_HEAD(&session->pp_job_fb_lookup_list[i]);
+ }
+
+ session->pid = _mali_osk_get_pid();
+ session->comm = _mali_osk_get_comm();
+ session->max_mali_mem_allocated_size = 0;
+ for (i = 0; i < MALI_MEM_TYPE_MAX; i ++) {
+ atomic_set(&session->mali_mem_array[i], 0);
+ }
+ atomic_set(&session->mali_mem_allocated_pages, 0);
+ *context = (void *)session;
+
+ /* Add session to the list of all sessions. */
+ mali_session_add(session);
+
+ MALI_DEBUG_PRINT(3, ("Session started\n"));
+ return _MALI_OSK_ERR_OK;
+
+err_time_line:
+ mali_soft_job_system_destroy(session->soft_job_system);
+err_soft:
+ mali_memory_session_end(session);
+err_session:
+ mali_mmu_pagedir_free(session->page_directory);
+err_mmu:
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+err:
+ _mali_osk_free(session);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+}
+
+#if defined(DEBUG)
+/* parameter used for debug */
+extern u32 num_pm_runtime_resume;
+extern u32 num_pm_updates;
+extern u32 num_pm_updates_up;
+extern u32 num_pm_updates_down;
+#endif
+
+_mali_osk_errcode_t _mali_ukk_close(void **context)
+{
+ struct mali_session_data *session;
+ MALI_CHECK_NON_NULL(context, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (struct mali_session_data *)*context;
+
+ MALI_DEBUG_PRINT(3, ("Session ending\n"));
+
+ MALI_DEBUG_ASSERT_POINTER(session->soft_job_system);
+ MALI_DEBUG_ASSERT_POINTER(session->timeline_system);
+
+ /* Remove session from list of all sessions. */
+ mali_session_remove(session);
+
+ /* This flag is used to prevent queueing of jobs due to activation. */
+ session->is_aborting = MALI_TRUE;
+
+ /* Stop the soft job timer. */
+ mali_timeline_system_stop_timer(session->timeline_system);
+
+ /* Abort queued jobs */
+ mali_scheduler_abort_session(session);
+
+ /* Abort executing jobs */
+ mali_executor_abort_session(session);
+
+ /* Abort the soft job system. */
+ mali_soft_job_system_abort(session->soft_job_system);
+
+ /* Force execution of all pending bottom half processing for GP and PP. */
+ _mali_osk_wq_flush();
+
+ /* The session PP list should now be empty. */
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&session->pp_job_list));
+
+ /* At this point the GP and PP scheduler no longer has any jobs queued or running from this
+ * session, and all soft jobs in the soft job system has been destroyed. */
+
+ /* Any trackers left in the timeline system are directly or indirectly waiting on external
+ * sync fences. Cancel all sync fence waiters to trigger activation of all remaining
+ * trackers. This call will sleep until all timelines are empty. */
+ mali_timeline_system_abort(session->timeline_system);
+
+ /* Flush pending work.
+ * Needed to make sure all bottom half processing related to this
+ * session has been completed, before we free internal data structures.
+ */
+ _mali_osk_wq_flush();
+
+ /* Destroy timeline system. */
+ mali_timeline_system_destroy(session->timeline_system);
+ session->timeline_system = NULL;
+
+ /* Destroy soft system. */
+ mali_soft_job_system_destroy(session->soft_job_system);
+ session->soft_job_system = NULL;
+
+ MALI_DEBUG_CODE({
+ /* Check that the pp_job_fb_lookup_list array is empty. */
+ u32 i;
+ for (i = 0; i < MALI_PP_JOB_FB_LOOKUP_LIST_SIZE; ++i)
+ {
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&session->pp_job_fb_lookup_list[i]));
+ }
+ });
+
+ /* Free remaining memory allocated to this session */
+ mali_memory_session_end(session);
+
+#if defined(CONFIG_MALI_DVFS)
+ _mali_osk_atomic_term(&session->number_of_window_jobs);
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_stop_sampling(session->pid);
+#endif
+
+ /* Free session data structures */
+ mali_mmu_pagedir_unmap(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE);
+ mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+
+ *context = NULL;
+
+ MALI_DEBUG_PRINT(3, ("Session has ended\n"));
+
+#if defined(DEBUG)
+ MALI_DEBUG_PRINT(3, ("Stats: # runtime resumes: %u\n", num_pm_runtime_resume));
+ MALI_DEBUG_PRINT(3, (" # PM updates: .... %u (up %u, down %u)\n", num_pm_updates, num_pm_updates_up, num_pm_updates_down));
+
+ num_pm_runtime_resume = 0;
+ num_pm_updates = 0;
+ num_pm_updates_up = 0;
+ num_pm_updates_down = 0;
+#endif
+
+ return _MALI_OSK_ERR_OK;;
+}
+
+#if MALI_STATE_TRACKING
+u32 _mali_kernel_core_dump_state(char *buf, u32 size)
+{
+ int n = 0; /* Number of bytes written to buf */
+
+ n += mali_scheduler_dump_state(buf + n, size - n);
+ n += mali_executor_dump_state(buf + n, size - n);
+
+ return n;
+}
+#endif
diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_core.h b/drivers/gpu/arm/utgard/common/mali_kernel_core.h
new file mode 100644
index 000000000000..8cdbc5af3205
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_kernel_core.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_CORE_H__
+#define __MALI_KERNEL_CORE_H__
+
+#include "mali_osk.h"
+
+typedef enum {
+ _MALI_PRODUCT_ID_UNKNOWN,
+ _MALI_PRODUCT_ID_MALI200,
+ _MALI_PRODUCT_ID_MALI300,
+ _MALI_PRODUCT_ID_MALI400,
+ _MALI_PRODUCT_ID_MALI450,
+ _MALI_PRODUCT_ID_MALI470,
+} _mali_product_id_t;
+
+extern mali_bool mali_gpu_class_is_mali450;
+extern mali_bool mali_gpu_class_is_mali470;
+
+_mali_osk_errcode_t mali_initialize_subsystems(void);
+
+void mali_terminate_subsystems(void);
+
+_mali_product_id_t mali_kernel_core_get_product_id(void);
+
+u32 mali_kernel_core_get_gpu_major_version(void);
+
+u32 mali_kernel_core_get_gpu_minor_version(void);
+
+u32 _mali_kernel_core_dump_state(char *buf, u32 size);
+
+MALI_STATIC_INLINE mali_bool mali_is_mali470(void)
+{
+ return mali_gpu_class_is_mali470;
+}
+
+MALI_STATIC_INLINE mali_bool mali_is_mali450(void)
+{
+ return mali_gpu_class_is_mali450;
+}
+
+MALI_STATIC_INLINE mali_bool mali_is_mali400(void)
+{
+ if (mali_gpu_class_is_mali450 || mali_gpu_class_is_mali470)
+ return MALI_FALSE;
+
+ return MALI_TRUE;
+}
+#endif /* __MALI_KERNEL_CORE_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_utilization.c b/drivers/gpu/arm/utgard/common/mali_kernel_utilization.c
new file mode 100644
index 000000000000..63b941742249
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_kernel_utilization.c
@@ -0,0 +1,440 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_scheduler.h"
+
+#include "mali_executor.h"
+#include "mali_dvfs_policy.h"
+#include "mali_control_timer.h"
+
+/* Thresholds for GP bound detection. */
+#define MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD 240
+#define MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD 250
+
+static _mali_osk_spinlock_irq_t *utilization_data_lock;
+
+static u32 num_running_gp_cores = 0;
+static u32 num_running_pp_cores = 0;
+
+static u64 work_start_time_gpu = 0;
+static u64 work_start_time_gp = 0;
+static u64 work_start_time_pp = 0;
+static u64 accumulated_work_time_gpu = 0;
+static u64 accumulated_work_time_gp = 0;
+static u64 accumulated_work_time_pp = 0;
+
+static u32 last_utilization_gpu = 0 ;
+static u32 last_utilization_gp = 0 ;
+static u32 last_utilization_pp = 0 ;
+
+void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data) = NULL;
+
+/* Define the first timer control timer timeout in milliseconds */
+static u32 mali_control_first_timeout = 100;
+static struct mali_gpu_utilization_data mali_util_data = {0, };
+
+struct mali_gpu_utilization_data *mali_utilization_calculate(u64 *start_time, u64 *time_period, mali_bool *need_add_timer)
+{
+ u64 time_now;
+ u32 leading_zeroes;
+ u32 shift_val;
+ u32 work_normalized_gpu;
+ u32 work_normalized_gp;
+ u32 work_normalized_pp;
+ u32 period_normalized;
+ u32 utilization_gpu;
+ u32 utilization_gp;
+ u32 utilization_pp;
+
+ mali_utilization_data_lock();
+
+ time_now = _mali_osk_time_get_ns();
+
+ *time_period = time_now - *start_time;
+
+ if (accumulated_work_time_gpu == 0 && work_start_time_gpu == 0) {
+ mali_control_timer_pause();
+ /*
+ * No work done for this period
+ * - No need to reschedule timer
+ * - Report zero usage
+ */
+ last_utilization_gpu = 0;
+ last_utilization_gp = 0;
+ last_utilization_pp = 0;
+
+ mali_util_data.utilization_gpu = last_utilization_gpu;
+ mali_util_data.utilization_gp = last_utilization_gp;
+ mali_util_data.utilization_pp = last_utilization_pp;
+
+ mali_utilization_data_unlock();
+
+ *need_add_timer = MALI_FALSE;
+
+ mali_executor_hint_disable(MALI_EXECUTOR_HINT_GP_BOUND);
+
+ MALI_DEBUG_PRINT(4, ("last_utilization_gpu = %d \n", last_utilization_gpu));
+ MALI_DEBUG_PRINT(4, ("last_utilization_gp = %d \n", last_utilization_gp));
+ MALI_DEBUG_PRINT(4, ("last_utilization_pp = %d \n", last_utilization_pp));
+
+ return &mali_util_data;
+ }
+
+ /* If we are currently busy, update working period up to now */
+ if (work_start_time_gpu != 0) {
+ accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+ work_start_time_gpu = time_now;
+
+ /* GP and/or PP will also be busy if the GPU is busy at this point */
+
+ if (work_start_time_gp != 0) {
+ accumulated_work_time_gp += (time_now - work_start_time_gp);
+ work_start_time_gp = time_now;
+ }
+
+ if (work_start_time_pp != 0) {
+ accumulated_work_time_pp += (time_now - work_start_time_pp);
+ work_start_time_pp = time_now;
+ }
+ }
+
+ /*
+ * We have two 64-bit values, a dividend and a divisor.
+ * To avoid dependencies to a 64-bit divider, we shift down the two values
+ * equally first.
+ * We shift the dividend up and possibly the divisor down, making the result X in 256.
+ */
+
+ /* Shift the 64-bit values down so they fit inside a 32-bit integer */
+ leading_zeroes = _mali_osk_clz((u32)(*time_period >> 32));
+ shift_val = 32 - leading_zeroes;
+ work_normalized_gpu = (u32)(accumulated_work_time_gpu >> shift_val);
+ work_normalized_gp = (u32)(accumulated_work_time_gp >> shift_val);
+ work_normalized_pp = (u32)(accumulated_work_time_pp >> shift_val);
+ period_normalized = (u32)(*time_period >> shift_val);
+
+ /*
+ * Now, we should report the usage in parts of 256
+ * this means we must shift up the dividend or down the divisor by 8
+ * (we could do a combination, but we just use one for simplicity,
+ * but the end result should be good enough anyway)
+ */
+ if (period_normalized > 0x00FFFFFF) {
+ /* The divisor is so big that it is safe to shift it down */
+ period_normalized >>= 8;
+ } else {
+ /*
+ * The divisor is so small that we can shift up the dividend, without loosing any data.
+ * (dividend is always smaller than the divisor)
+ */
+ work_normalized_gpu <<= 8;
+ work_normalized_gp <<= 8;
+ work_normalized_pp <<= 8;
+ }
+
+ utilization_gpu = work_normalized_gpu / period_normalized;
+ utilization_gp = work_normalized_gp / period_normalized;
+ utilization_pp = work_normalized_pp / period_normalized;
+
+ last_utilization_gpu = utilization_gpu;
+ last_utilization_gp = utilization_gp;
+ last_utilization_pp = utilization_pp;
+
+ if ((MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD < last_utilization_gp) &&
+ (MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD > last_utilization_pp)) {
+ mali_executor_hint_enable(MALI_EXECUTOR_HINT_GP_BOUND);
+ } else {
+ mali_executor_hint_disable(MALI_EXECUTOR_HINT_GP_BOUND);
+ }
+
+ /* starting a new period */
+ accumulated_work_time_gpu = 0;
+ accumulated_work_time_gp = 0;
+ accumulated_work_time_pp = 0;
+
+ *start_time = time_now;
+
+ mali_util_data.utilization_gp = last_utilization_gp;
+ mali_util_data.utilization_gpu = last_utilization_gpu;
+ mali_util_data.utilization_pp = last_utilization_pp;
+
+ mali_utilization_data_unlock();
+
+ *need_add_timer = MALI_TRUE;
+
+ MALI_DEBUG_PRINT(4, ("last_utilization_gpu = %d \n", last_utilization_gpu));
+ MALI_DEBUG_PRINT(4, ("last_utilization_gp = %d \n", last_utilization_gp));
+ MALI_DEBUG_PRINT(4, ("last_utilization_pp = %d \n", last_utilization_pp));
+
+ return &mali_util_data;
+}
+
+_mali_osk_errcode_t mali_utilization_init(void)
+{
+#if USING_GPU_UTILIZATION
+ _mali_osk_device_data data;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ if (NULL != data.utilization_callback) {
+ mali_utilization_callback = data.utilization_callback;
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Utilization handler installed \n"));
+ }
+ }
+#endif /* defined(USING_GPU_UTILIZATION) */
+
+ if (NULL == mali_utilization_callback) {
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: No platform utilization handler installed\n"));
+ }
+
+ utilization_data_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_UTILIZATION);
+ if (NULL == utilization_data_lock) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ num_running_gp_cores = 0;
+ num_running_pp_cores = 0;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_utilization_term(void)
+{
+ if (NULL != utilization_data_lock) {
+ _mali_osk_spinlock_irq_term(utilization_data_lock);
+ }
+}
+
+void mali_utilization_gp_start(void)
+{
+ mali_utilization_data_lock();
+
+ ++num_running_gp_cores;
+ if (1 == num_running_gp_cores) {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* First GP core started, consider GP busy from now and onwards */
+ work_start_time_gp = time_now;
+
+ if (0 == num_running_pp_cores) {
+ mali_bool is_resume = MALI_FALSE;
+ /*
+ * There are no PP cores running, so this is also the point
+ * at which we consider the GPU to be busy as well.
+ */
+ work_start_time_gpu = time_now;
+
+ is_resume = mali_control_timer_resume(time_now);
+
+ mali_utilization_data_unlock();
+
+ if (is_resume) {
+ /* Do some policy in new period for performance consideration */
+#if defined(CONFIG_MALI_DVFS)
+ /* Clear session->number_of_window_jobs, prepare parameter for dvfs */
+ mali_session_max_window_num();
+ if (0 == last_utilization_gpu) {
+ /*
+ * for mali_dev_pause is called in set clock,
+ * so each time we change clock, we will set clock to
+ * highest step even if under down clock case,
+ * it is not nessesary, so we only set the clock under
+ * last time utilization equal 0, we stop the timer then
+ * start the GPU again case
+ */
+ mali_dvfs_policy_new_period();
+ }
+#endif
+ /*
+ * First timeout using short interval for power consideration
+ * because we give full power in the new period, but if the
+ * job loading is light, finish in 10ms, the other time all keep
+ * in high freq it will wast time.
+ */
+ mali_control_timer_add(mali_control_first_timeout);
+ }
+ } else {
+ mali_utilization_data_unlock();
+ }
+
+ } else {
+ /* Nothing to do */
+ mali_utilization_data_unlock();
+ }
+}
+
+void mali_utilization_pp_start(void)
+{
+ mali_utilization_data_lock();
+
+ ++num_running_pp_cores;
+ if (1 == num_running_pp_cores) {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* First PP core started, consider PP busy from now and onwards */
+ work_start_time_pp = time_now;
+
+ if (0 == num_running_gp_cores) {
+ mali_bool is_resume = MALI_FALSE;
+ /*
+ * There are no GP cores running, so this is also the point
+ * at which we consider the GPU to be busy as well.
+ */
+ work_start_time_gpu = time_now;
+
+ /* Start a new period if stoped */
+ is_resume = mali_control_timer_resume(time_now);
+
+ mali_utilization_data_unlock();
+
+ if (is_resume) {
+#if defined(CONFIG_MALI_DVFS)
+ /* Clear session->number_of_window_jobs, prepare parameter for dvfs */
+ mali_session_max_window_num();
+ if (0 == last_utilization_gpu) {
+ /*
+ * for mali_dev_pause is called in set clock,
+ * so each time we change clock, we will set clock to
+ * highest step even if under down clock case,
+ * it is not nessesary, so we only set the clock under
+ * last time utilization equal 0, we stop the timer then
+ * start the GPU again case
+ */
+ mali_dvfs_policy_new_period();
+ }
+#endif
+
+ /*
+ * First timeout using short interval for power consideration
+ * because we give full power in the new period, but if the
+ * job loading is light, finish in 10ms, the other time all keep
+ * in high freq it will wast time.
+ */
+ mali_control_timer_add(mali_control_first_timeout);
+ }
+ } else {
+ mali_utilization_data_unlock();
+ }
+ } else {
+ /* Nothing to do */
+ mali_utilization_data_unlock();
+ }
+}
+
+void mali_utilization_gp_end(void)
+{
+ mali_utilization_data_lock();
+
+ --num_running_gp_cores;
+ if (0 == num_running_gp_cores) {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* Last GP core ended, consider GP idle from now and onwards */
+ accumulated_work_time_gp += (time_now - work_start_time_gp);
+ work_start_time_gp = 0;
+
+ if (0 == num_running_pp_cores) {
+ /*
+ * There are no PP cores running, so this is also the point
+ * at which we consider the GPU to be idle as well.
+ */
+ accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+ work_start_time_gpu = 0;
+ }
+ }
+
+ mali_utilization_data_unlock();
+}
+
+void mali_utilization_pp_end(void)
+{
+ mali_utilization_data_lock();
+
+ --num_running_pp_cores;
+ if (0 == num_running_pp_cores) {
+ u64 time_now = _mali_osk_time_get_ns();
+
+ /* Last PP core ended, consider PP idle from now and onwards */
+ accumulated_work_time_pp += (time_now - work_start_time_pp);
+ work_start_time_pp = 0;
+
+ if (0 == num_running_gp_cores) {
+ /*
+ * There are no GP cores running, so this is also the point
+ * at which we consider the GPU to be idle as well.
+ */
+ accumulated_work_time_gpu += (time_now - work_start_time_gpu);
+ work_start_time_gpu = 0;
+ }
+ }
+
+ mali_utilization_data_unlock();
+}
+
+mali_bool mali_utilization_enabled(void)
+{
+#if defined(CONFIG_MALI_DVFS)
+ return mali_dvfs_policy_enabled();
+#else
+ return (NULL != mali_utilization_callback);
+#endif /* defined(CONFIG_MALI_DVFS) */
+}
+
+void mali_utilization_platform_realize(struct mali_gpu_utilization_data *util_data)
+{
+ MALI_DEBUG_ASSERT_POINTER(mali_utilization_callback);
+
+ mali_utilization_callback(util_data);
+}
+
+void mali_utilization_reset(void)
+{
+ accumulated_work_time_gpu = 0;
+ accumulated_work_time_gp = 0;
+ accumulated_work_time_pp = 0;
+
+ last_utilization_gpu = 0;
+ last_utilization_gp = 0;
+ last_utilization_pp = 0;
+}
+
+void mali_utilization_data_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(utilization_data_lock);
+}
+
+void mali_utilization_data_unlock(void)
+{
+ _mali_osk_spinlock_irq_unlock(utilization_data_lock);
+}
+
+void mali_utilization_data_assert_locked(void)
+{
+ MALI_DEBUG_ASSERT_LOCK_HELD(utilization_data_lock);
+}
+
+u32 _mali_ukk_utilization_gp_pp(void)
+{
+ return last_utilization_gpu;
+}
+
+u32 _mali_ukk_utilization_gp(void)
+{
+ return last_utilization_gp;
+}
+
+u32 _mali_ukk_utilization_pp(void)
+{
+ return last_utilization_pp;
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_utilization.h b/drivers/gpu/arm/utgard/common/mali_kernel_utilization.h
new file mode 100644
index 000000000000..3c20b1983762
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_kernel_utilization.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_UTILIZATION_H__
+#define __MALI_KERNEL_UTILIZATION_H__
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_osk.h"
+
+/**
+ * Initialize/start the Mali GPU utilization metrics reporting.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_utilization_init(void);
+
+/**
+ * Terminate the Mali GPU utilization metrics reporting
+ */
+void mali_utilization_term(void);
+
+/**
+ * Check if Mali utilization is enabled
+ */
+mali_bool mali_utilization_enabled(void);
+
+/**
+ * Should be called when a job is about to execute a GP job
+ */
+void mali_utilization_gp_start(void);
+
+/**
+ * Should be called when a job has completed executing a GP job
+ */
+void mali_utilization_gp_end(void);
+
+/**
+ * Should be called when a job is about to execute a PP job
+ */
+void mali_utilization_pp_start(void);
+
+/**
+ * Should be called when a job has completed executing a PP job
+ */
+void mali_utilization_pp_end(void);
+
+/**
+ * Should be called to calcution the GPU utilization
+ */
+struct mali_gpu_utilization_data *mali_utilization_calculate(u64 *start_time, u64 *time_period, mali_bool *need_add_timer);
+
+_mali_osk_spinlock_irq_t *mali_utilization_get_lock(void);
+
+void mali_utilization_platform_realize(struct mali_gpu_utilization_data *util_data);
+
+void mali_utilization_data_lock(void);
+
+void mali_utilization_data_unlock(void);
+
+void mali_utilization_data_assert_locked(void);
+
+void mali_utilization_reset(void);
+
+
+#endif /* __MALI_KERNEL_UTILIZATION_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_kernel_vsync.c b/drivers/gpu/arm/utgard/common/mali_kernel_vsync.c
new file mode 100644
index 000000000000..2eed4c88cf56
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_kernel_vsync.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+
+#include "mali_osk_profiling.h"
+
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
+{
+ _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
+ MALI_IGNORE(event); /* event is not used for release code, and that is OK */
+
+ /*
+ * Manually generate user space events in kernel space.
+ * This saves user space from calling kernel space twice in this case.
+ * We just need to remember to add pid and tid manually.
+ */
+ if (event == _MALI_UK_VSYNC_EVENT_BEGIN_WAIT) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+ _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+ }
+
+ if (event == _MALI_UK_VSYNC_EVENT_END_WAIT) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+ _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+ }
+
+
+ MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
+ MALI_SUCCESS;
+}
+
diff --git a/drivers/gpu/arm/utgard/common/mali_l2_cache.c b/drivers/gpu/arm/utgard/common/mali_l2_cache.c
new file mode 100644
index 000000000000..494ba789cd08
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_l2_cache.c
@@ -0,0 +1,534 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_scheduler.h"
+#include "mali_pm.h"
+#include "mali_pm_domain.h"
+
+/**
+ * Size of the Mali L2 cache registers in bytes
+ */
+#define MALI400_L2_CACHE_REGISTERS_SIZE 0x30
+
+/**
+ * Mali L2 cache register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_l2_cache_register {
+ MALI400_L2_CACHE_REGISTER_SIZE = 0x0004,
+ MALI400_L2_CACHE_REGISTER_STATUS = 0x0008,
+ /*unused = 0x000C */
+ MALI400_L2_CACHE_REGISTER_COMMAND = 0x0010,
+ MALI400_L2_CACHE_REGISTER_CLEAR_PAGE = 0x0014,
+ MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0018,
+ MALI400_L2_CACHE_REGISTER_ENABLE = 0x001C,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x002C,
+} mali_l2_cache_register;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_command {
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01,
+} mali_l2_cache_command;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_enable {
+ MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /* Default */
+ MALI400_L2_CACHE_ENABLE_ACCESS = 0x01,
+ MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02,
+} mali_l2_cache_enable;
+
+/**
+ * Mali L2 cache status bits
+ */
+typedef enum mali_l2_cache_status {
+ MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01,
+ MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02,
+} mali_l2_cache_status;
+
+#define MALI400_L2_MAX_READS_NOT_SET -1
+
+static struct mali_l2_cache_core *
+ mali_global_l2s[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, };
+static u32 mali_global_num_l2s = 0;
+
+int mali_l2_max_reads = MALI400_L2_MAX_READS_NOT_SET;
+
+
+/* Local helper functions */
+
+static void mali_l2_cache_reset(struct mali_l2_cache_core *cache);
+
+static _mali_osk_errcode_t mali_l2_cache_send_command(
+ struct mali_l2_cache_core *cache, u32 reg, u32 val);
+
+static void mali_l2_cache_lock(struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ _mali_osk_spinlock_irq_lock(cache->lock);
+}
+
+static void mali_l2_cache_unlock(struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ _mali_osk_spinlock_irq_unlock(cache->lock);
+}
+
+/* Implementation of the L2 cache interface */
+
+struct mali_l2_cache_core *mali_l2_cache_create(
+ _mali_osk_resource_t *resource, u32 domain_index)
+{
+ struct mali_l2_cache_core *cache = NULL;
+#if defined(DEBUG)
+ u32 cache_size;
+#endif
+
+ MALI_DEBUG_PRINT(4, ("Mali L2 cache: Creating Mali L2 cache: %s\n",
+ resource->description));
+
+ if (mali_global_num_l2s >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES) {
+ MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 caches\n"));
+ return NULL;
+ }
+
+ cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core));
+ if (NULL == cache) {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
+ return NULL;
+ }
+
+ cache->core_id = mali_global_num_l2s;
+ cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
+ cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
+ cache->counter_value0_base = 0;
+ cache->counter_value1_base = 0;
+ cache->pm_domain = NULL;
+ cache->power_is_on = MALI_FALSE;
+ cache->last_invalidated_id = 0;
+
+ if (_MALI_OSK_ERR_OK != mali_hw_core_create(&cache->hw_core,
+ resource, MALI400_L2_CACHE_REGISTERS_SIZE)) {
+ _mali_osk_free(cache);
+ return NULL;
+ }
+
+#if defined(DEBUG)
+ cache_size = mali_hw_core_register_read(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_SIZE);
+ MALI_DEBUG_PRINT(2, ("Mali L2 cache: Created %s: % 3uK, %u-way, % 2ubyte cache line, % 3ubit external bus\n",
+ resource->description,
+ 1 << (((cache_size >> 16) & 0xff) - 10),
+ 1 << ((cache_size >> 8) & 0xff),
+ 1 << (cache_size & 0xff),
+ 1 << ((cache_size >> 24) & 0xff)));
+#endif
+
+ cache->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_L2);
+ if (NULL == cache->lock) {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n",
+ cache->hw_core.description));
+ mali_hw_core_delete(&cache->hw_core);
+ _mali_osk_free(cache);
+ return NULL;
+ }
+
+ /* register with correct power domain */
+ cache->pm_domain = mali_pm_register_l2_cache(
+ domain_index, cache);
+
+ mali_global_l2s[mali_global_num_l2s] = cache;
+ mali_global_num_l2s++;
+
+ return cache;
+}
+
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache)
+{
+ u32 i;
+ for (i = 0; i < mali_global_num_l2s; i++) {
+ if (mali_global_l2s[i] != cache) {
+ continue;
+ }
+
+ mali_global_l2s[i] = NULL;
+ mali_global_num_l2s--;
+
+ if (i == mali_global_num_l2s) {
+ /* Removed last element, nothing more to do */
+ break;
+ }
+
+ /*
+ * We removed a l2 cache from the middle of the array,
+ * so move the last l2 cache to current position
+ */
+ mali_global_l2s[i] = mali_global_l2s[mali_global_num_l2s];
+ mali_global_l2s[mali_global_num_l2s] = NULL;
+
+ /* All good */
+ break;
+ }
+
+ _mali_osk_spinlock_irq_term(cache->lock);
+ mali_hw_core_delete(&cache->hw_core);
+ _mali_osk_free(cache);
+}
+
+void mali_l2_cache_power_up(struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ mali_l2_cache_lock(cache);
+
+ mali_l2_cache_reset(cache);
+
+ if ((1 << MALI_DOMAIN_INDEX_DUMMY) != cache->pm_domain->pmu_mask)
+ MALI_DEBUG_ASSERT(MALI_FALSE == cache->power_is_on);
+ cache->power_is_on = MALI_TRUE;
+
+ mali_l2_cache_unlock(cache);
+}
+
+void mali_l2_cache_power_down(struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ mali_l2_cache_lock(cache);
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == cache->power_is_on);
+
+ /*
+ * The HW counters will start from zero again when we resume,
+ * but we should report counters as always increasing.
+ * Take a copy of the HW values now in order to add this to
+ * the values we report after being powered up.
+ *
+ * The physical power off of the L2 cache might be outside our
+ * own control (e.g. runtime PM). That is why we must manually
+ * set set the counter value to zero as well.
+ */
+
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+ cache->counter_value0_base += mali_hw_core_register_read(
+ &cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0, 0);
+ }
+
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+ cache->counter_value1_base += mali_hw_core_register_read(
+ &cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1, 0);
+ }
+
+
+ cache->power_is_on = MALI_FALSE;
+
+ mali_l2_cache_unlock(cache);
+}
+
+void mali_l2_cache_core_set_counter_src(
+ struct mali_l2_cache_core *cache, u32 source_id, u32 counter)
+{
+ u32 reg_offset_src;
+ u32 reg_offset_val;
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ MALI_DEBUG_ASSERT(source_id >= 0 && source_id <= 1);
+
+ mali_l2_cache_lock(cache);
+
+ if (0 == source_id) {
+ /* start counting from 0 */
+ cache->counter_value0_base = 0;
+ cache->counter_src0 = counter;
+ reg_offset_src = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0;
+ reg_offset_val = MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0;
+ } else {
+ /* start counting from 0 */
+ cache->counter_value1_base = 0;
+ cache->counter_src1 = counter;
+ reg_offset_src = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1;
+ reg_offset_val = MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1;
+ }
+
+ if (cache->power_is_on) {
+ u32 hw_src;
+
+ if (MALI_HW_CORE_NO_COUNTER != counter) {
+ hw_src = counter;
+ } else {
+ hw_src = 0; /* disable value for HW */
+ }
+
+ /* Set counter src */
+ mali_hw_core_register_write(&cache->hw_core,
+ reg_offset_src, hw_src);
+
+ /* Make sure the HW starts counting from 0 again */
+ mali_hw_core_register_write(&cache->hw_core,
+ reg_offset_val, 0);
+ }
+
+ mali_l2_cache_unlock(cache);
+}
+
+void mali_l2_cache_core_get_counter_values(
+ struct mali_l2_cache_core *cache,
+ u32 *src0, u32 *value0, u32 *src1, u32 *value1)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ MALI_DEBUG_ASSERT(NULL != src0);
+ MALI_DEBUG_ASSERT(NULL != value0);
+ MALI_DEBUG_ASSERT(NULL != src1);
+ MALI_DEBUG_ASSERT(NULL != value1);
+
+ mali_l2_cache_lock(cache);
+
+ *src0 = cache->counter_src0;
+ *src1 = cache->counter_src1;
+
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+ if (MALI_TRUE == cache->power_is_on) {
+ *value0 = mali_hw_core_register_read(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+ } else {
+ *value0 = 0;
+ }
+
+ /* Add base offset value (in case we have been power off) */
+ *value0 += cache->counter_value0_base;
+ }
+
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+ if (MALI_TRUE == cache->power_is_on) {
+ *value1 = mali_hw_core_register_read(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+ } else {
+ *value1 = 0;
+ }
+
+ /* Add base offset value (in case we have been power off) */
+ *value1 += cache->counter_value1_base;
+ }
+
+ mali_l2_cache_unlock(cache);
+}
+
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index)
+{
+ if (mali_global_num_l2s > index) {
+ return mali_global_l2s[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void)
+{
+ return mali_global_num_l2s;
+}
+
+void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ if (NULL == cache) {
+ return;
+ }
+
+ mali_l2_cache_lock(cache);
+
+ cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
+ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND,
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+
+ mali_l2_cache_unlock(cache);
+}
+
+void mali_l2_cache_invalidate_conditional(
+ struct mali_l2_cache_core *cache, u32 id)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ if (NULL == cache) {
+ return;
+ }
+
+ /*
+ * If the last cache invalidation was done by a job with a higher id we
+ * don't have to flush. Since user space will store jobs w/ their
+ * corresponding memory in sequence (first job #0, then job #1, ...),
+ * we don't have to flush for job n-1 if job n has already invalidated
+ * the cache since we know for sure that job n-1's memory was already
+ * written when job n was started.
+ */
+
+ mali_l2_cache_lock(cache);
+
+ if (((s32)id) > ((s32)cache->last_invalidated_id)) {
+ /* Set latest invalidated id to current "point in time" */
+ cache->last_invalidated_id =
+ mali_scheduler_get_new_cache_order();
+ mali_l2_cache_send_command(cache,
+ MALI400_L2_CACHE_REGISTER_COMMAND,
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ }
+
+ mali_l2_cache_unlock(cache);
+}
+
+void mali_l2_cache_invalidate_all(void)
+{
+ u32 i;
+ for (i = 0; i < mali_global_num_l2s; i++) {
+ struct mali_l2_cache_core *cache = mali_global_l2s[i];
+ _mali_osk_errcode_t ret;
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ mali_l2_cache_lock(cache);
+
+ if (MALI_TRUE != cache->power_is_on) {
+ mali_l2_cache_unlock(cache);
+ continue;
+ }
+
+ cache->last_invalidated_id =
+ mali_scheduler_get_new_cache_order();
+
+ ret = mali_l2_cache_send_command(cache,
+ MALI400_L2_CACHE_REGISTER_COMMAND,
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
+ }
+
+ mali_l2_cache_unlock(cache);
+ }
+}
+
+void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages)
+{
+ u32 i;
+ for (i = 0; i < mali_global_num_l2s; i++) {
+ struct mali_l2_cache_core *cache = mali_global_l2s[i];
+ u32 j;
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ mali_l2_cache_lock(cache);
+
+ if (MALI_TRUE != cache->power_is_on) {
+ mali_l2_cache_unlock(cache);
+ continue;
+ }
+
+ for (j = 0; j < num_pages; j++) {
+ _mali_osk_errcode_t ret;
+
+ ret = mali_l2_cache_send_command(cache,
+ MALI400_L2_CACHE_REGISTER_CLEAR_PAGE,
+ pages[j]);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to invalidate cache (page)\n"));
+ }
+ }
+
+ mali_l2_cache_unlock(cache);
+ }
+}
+
+/* -------- local helper functions below -------- */
+
+static void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock);
+
+ /* Invalidate cache (just to keep it in a known state at startup) */
+ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND,
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+
+ /* Enable cache */
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_ENABLE,
+ (u32)MALI400_L2_CACHE_ENABLE_ACCESS |
+ (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
+
+ if (MALI400_L2_MAX_READS_NOT_SET != mali_l2_max_reads) {
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_MAX_READS,
+ (u32)mali_l2_max_reads);
+ }
+
+ /* Restart any performance counters (if enabled) */
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0,
+ cache->counter_src0);
+ }
+
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1,
+ cache->counter_src1);
+ }
+}
+
+static _mali_osk_errcode_t mali_l2_cache_send_command(
+ struct mali_l2_cache_core *cache, u32 reg, u32 val)
+{
+ int i = 0;
+ const int loop_count = 100000;
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock);
+
+ /*
+ * First, wait for L2 cache command handler to go idle.
+ * (Commands received while processing another command will be ignored)
+ */
+ for (i = 0; i < loop_count; i++) {
+ if (!(mali_hw_core_register_read(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_STATUS) &
+ (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) {
+ break;
+ }
+ }
+
+ if (i == loop_count) {
+ MALI_DEBUG_PRINT(1, ("Mali L2 cache: aborting wait for command interface to go idle\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* then issue the command */
+ mali_hw_core_register_write(&cache->hw_core, reg, val);
+
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_l2_cache.h b/drivers/gpu/arm/utgard/common/mali_l2_cache.h
new file mode 100644
index 000000000000..6dc8ec22d6de
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_l2_cache.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_L2_CACHE_H__
+#define __MALI_KERNEL_L2_CACHE_H__
+
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+
+#define MALI_MAX_NUMBER_OF_L2_CACHE_CORES 3
+/* Maximum 1 GP and 4 PP for an L2 cache core (Mali-400 MP4) */
+#define MALI_MAX_NUMBER_OF_GROUPS_PER_L2_CACHE 5
+
+/**
+ * Definition of the L2 cache core struct
+ * Used to track a L2 cache unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_l2_cache_core {
+ /* Common HW core functionality */
+ struct mali_hw_core hw_core;
+
+ /* Synchronize L2 cache access */
+ _mali_osk_spinlock_irq_t *lock;
+
+ /* Unique core ID */
+ u32 core_id;
+
+ /* The power domain this L2 cache belongs to */
+ struct mali_pm_domain *pm_domain;
+
+ /* MALI_TRUE if power is on for this L2 cache */
+ mali_bool power_is_on;
+
+ /* A "timestamp" to avoid unnecessary flushes */
+ u32 last_invalidated_id;
+
+ /* Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src0;
+
+ /* Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src1;
+
+ /*
+ * Performance counter 0 value base/offset
+ * (allows accumulative reporting even after power off)
+ */
+ u32 counter_value0_base;
+
+ /*
+ * Performance counter 0 value base/offset
+ * (allows accumulative reporting even after power off)
+ */
+ u32 counter_value1_base;
+
+ /* Used by PM domains to link L2 caches of same domain */
+ _mali_osk_list_t pm_domain_list;
+};
+
+_mali_osk_errcode_t mali_l2_cache_initialize(void);
+void mali_l2_cache_terminate(void);
+
+struct mali_l2_cache_core *mali_l2_cache_create(
+ _mali_osk_resource_t *resource, u32 domain_index);
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache);
+
+MALI_STATIC_INLINE u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ return cache->core_id;
+}
+
+MALI_STATIC_INLINE struct mali_pm_domain *mali_l2_cache_get_pm_domain(
+ struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ return cache->pm_domain;
+}
+
+void mali_l2_cache_power_up(struct mali_l2_cache_core *cache);
+void mali_l2_cache_power_down(struct mali_l2_cache_core *cache);
+
+void mali_l2_cache_core_set_counter_src(
+ struct mali_l2_cache_core *cache, u32 source_id, u32 counter);
+
+MALI_STATIC_INLINE u32 mali_l2_cache_core_get_counter_src0(
+ struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ return cache->counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_l2_cache_core_get_counter_src1(
+ struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ return cache->counter_src1;
+}
+
+void mali_l2_cache_core_get_counter_values(
+ struct mali_l2_cache_core *cache,
+ u32 *src0, u32 *value0, u32 *src1, u32 *value1);
+
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index);
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void);
+
+struct mali_group *mali_l2_cache_get_group(
+ struct mali_l2_cache_core *cache, u32 index);
+
+void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache);
+void mali_l2_cache_invalidate_conditional(
+ struct mali_l2_cache_core *cache, u32 id);
+
+void mali_l2_cache_invalidate_all(void);
+void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages);
+
+#endif /* __MALI_KERNEL_L2_CACHE_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_mem_validation.c b/drivers/gpu/arm/utgard/common/mali_mem_validation.c
new file mode 100644
index 000000000000..e2b5b2a7f739
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_mem_validation.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_mem_validation.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#define MALI_INVALID_MEM_ADDR 0xFFFFFFFF
+
+typedef struct {
+ u32 phys_base; /**< Mali physical base of the memory, page aligned */
+ u32 size; /**< size in bytes of the memory, multiple of page size */
+} _mali_mem_validation_t;
+
+static _mali_mem_validation_t mali_mem_validator = { MALI_INVALID_MEM_ADDR, MALI_INVALID_MEM_ADDR };
+
+_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size)
+{
+ /* Check that no other MEM_VALIDATION resources exist */
+ if (MALI_INVALID_MEM_ADDR != mali_mem_validator.phys_base) {
+ MALI_PRINT_ERROR(("Failed to add frame buffer memory; another range is already specified\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Check restrictions on page alignment */
+ if ((0 != (start & (~_MALI_OSK_CPU_PAGE_MASK))) ||
+ (0 != (size & (~_MALI_OSK_CPU_PAGE_MASK)))) {
+ MALI_PRINT_ERROR(("Failed to add frame buffer memory; incorrect alignment\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_mem_validator.phys_base = start;
+ mali_mem_validator.size = size;
+ MALI_DEBUG_PRINT(2, ("Memory Validator installed for Mali physical address base=0x%08X, size=0x%08X\n",
+ mali_mem_validator.phys_base, mali_mem_validator.size));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size)
+{
+ if (phys_addr < (phys_addr + size)) { /* Don't allow overflow (or zero size) */
+ if ((0 == (phys_addr & (~_MALI_OSK_CPU_PAGE_MASK))) &&
+ (0 == (size & (~_MALI_OSK_CPU_PAGE_MASK)))) {
+ if ((phys_addr >= mali_mem_validator.phys_base) &&
+ ((phys_addr + (size - 1)) >= mali_mem_validator.phys_base) &&
+ (phys_addr <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) &&
+ ((phys_addr + (size - 1)) <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1)))) {
+ MALI_DEBUG_PRINT(3, ("Accepted range 0x%08X + size 0x%08X (= 0x%08X)\n", phys_addr, size, (phys_addr + size - 1)));
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+ }
+
+ MALI_PRINT_ERROR(("MALI PHYSICAL RANGE VALIDATION ERROR: The range supplied was: phys_base=0x%08X, size=0x%08X\n", phys_addr, size));
+
+ return _MALI_OSK_ERR_FAULT;
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_mem_validation.h b/drivers/gpu/arm/utgard/common/mali_mem_validation.h
new file mode 100644
index 000000000000..267720625d87
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_mem_validation.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2011-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEM_VALIDATION_H__
+#define __MALI_MEM_VALIDATION_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size);
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size);
+
+#endif /* __MALI_MEM_VALIDATION_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_mmu.c b/drivers/gpu/arm/utgard/common/mali_mmu.c
new file mode 100644
index 000000000000..b975c1468d67
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_mmu.c
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_ukk.h"
+
+#include "mali_mmu.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_mmu_page_directory.h"
+
+/**
+ * Size of the MMU registers in bytes
+ */
+#define MALI_MMU_REGISTERS_SIZE 0x24
+
+/**
+ * MMU commands
+ * These are the commands that can be sent
+ * to the MMU unit.
+ */
+typedef enum mali_mmu_command {
+ MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
+ MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
+ MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**< Enable stall on page fault */
+ MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
+ MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
+ MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
+ MALI_MMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
+} mali_mmu_command;
+
+static void mali_mmu_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data);
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu);
+
+/* page fault queue flush helper pages
+ * note that the mapping pointers are currently unused outside of the initialization functions */
+static mali_dma_addr mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_directory_mapping = NULL;
+static mali_dma_addr mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_table_mapping = NULL;
+static mali_dma_addr mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_data_page_mapping = NULL;
+
+/* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
+static mali_dma_addr mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+static mali_io_address mali_empty_page_directory_virt = NULL;
+
+
+_mali_osk_errcode_t mali_mmu_initialize(void)
+{
+ /* allocate the helper pages */
+ mali_empty_page_directory_phys = mali_allocate_empty_page(&mali_empty_page_directory_virt);
+ if (0 == mali_empty_page_directory_phys) {
+ MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate empty page directory.\n"));
+ mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_create_fault_flush_pages(&mali_page_fault_flush_page_directory,
+ &mali_page_fault_flush_page_directory_mapping,
+ &mali_page_fault_flush_page_table,
+ &mali_page_fault_flush_page_table_mapping,
+ &mali_page_fault_flush_data_page,
+ &mali_page_fault_flush_data_page_mapping)) {
+ MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate fault flush pages\n"));
+ mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt);
+ mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+ mali_empty_page_directory_virt = NULL;
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mmu_terminate(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali MMU: terminating\n"));
+
+ /* Free global helper pages */
+ mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt);
+ mali_empty_page_directory_phys = MALI_INVALID_PAGE;
+ mali_empty_page_directory_virt = NULL;
+
+ /* Free the page fault flush pages */
+ mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory,
+ &mali_page_fault_flush_page_directory_mapping,
+ &mali_page_fault_flush_page_table,
+ &mali_page_fault_flush_page_table_mapping,
+ &mali_page_fault_flush_data_page,
+ &mali_page_fault_flush_data_page_mapping);
+}
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual)
+{
+ struct mali_mmu_core *mmu = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(resource);
+
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description));
+
+ mmu = _mali_osk_calloc(1, sizeof(struct mali_mmu_core));
+ if (NULL != mmu) {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE)) {
+ if (_MALI_OSK_ERR_OK == mali_group_add_mmu_core(group, mmu)) {
+ if (is_virtual) {
+ /* Skip reset and IRQ setup for virtual MMU */
+ return mmu;
+ }
+
+ if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu)) {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ mmu->irq = _mali_osk_irq_init(resource->irq,
+ mali_group_upper_half_mmu,
+ group,
+ mali_mmu_probe_trigger,
+ mali_mmu_probe_ack,
+ mmu,
+ resource->description);
+ if (NULL != mmu->irq) {
+ return mmu;
+ } else {
+ MALI_PRINT_ERROR(("Mali MMU: Failed to setup interrupt handlers for MMU %s\n", mmu->hw_core.description));
+ }
+ }
+ mali_group_remove_mmu_core(group);
+ } else {
+ MALI_PRINT_ERROR(("Mali MMU: Failed to add core %s to group\n", mmu->hw_core.description));
+ }
+ mali_hw_core_delete(&mmu->hw_core);
+ }
+
+ _mali_osk_free(mmu);
+ } else {
+ MALI_PRINT_ERROR(("Failed to allocate memory for MMU\n"));
+ }
+
+ return NULL;
+}
+
+void mali_mmu_delete(struct mali_mmu_core *mmu)
+{
+ if (NULL != mmu->irq) {
+ _mali_osk_irq_term(mmu->irq);
+ }
+
+ mali_hw_core_delete(&mmu->hw_core);
+ _mali_osk_free(mmu);
+}
+
+static void mali_mmu_enable_paging(struct mali_mmu_core *mmu)
+{
+ int i;
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+ if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_PAGING_ENABLED) {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Enable paging request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ }
+}
+
+/**
+ * Issues the enable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ * @return MALI_TRUE if HW stall was successfully engaged, otherwise MALI_FALSE (req timed out)
+ */
+static mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu)
+{
+ int i;
+ u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
+ MALI_DEBUG_PRINT(4, ("MMU stall is implicit when Paging is not enabled.\n"));
+ return MALI_TRUE;
+ }
+
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n"));
+ return MALI_FALSE;
+ }
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+ mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ break;
+ }
+ if ((mmu_status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) && (0 == (mmu_status & MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE))) {
+ break;
+ }
+ if (0 == (mmu_status & (MALI_MMU_STATUS_BIT_PAGING_ENABLED))) {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_DEBUG_PRINT(2, ("Enable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ return MALI_FALSE;
+ }
+
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ MALI_DEBUG_PRINT(2, ("Aborting MMU stall request since it has a pagefault.\n"));
+ return MALI_FALSE;
+ }
+
+ return MALI_TRUE;
+}
+
+/**
+ * Issues the disable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ */
+static void mali_mmu_disable_stall(struct mali_mmu_core *mmu)
+{
+ int i;
+ u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
+ MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n"));
+ return;
+ }
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ MALI_DEBUG_PRINT(2, ("Aborting MMU disable stall request since it is in pagefault state.\n"));
+ return;
+ }
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+ u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+ if (0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE)) {
+ break;
+ }
+ if (status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
+ break;
+ }
+ if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i) MALI_DEBUG_PRINT(1, ("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu)
+{
+ MALI_DEBUG_PRINT(4, ("Mali MMU: %s: Leaving page fault mode\n", mmu->hw_core.description));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE);
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu)
+{
+ int i;
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, 0xCAFEBABE);
+ MALI_DEBUG_ASSERT(0xCAFEB000 == mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_HARD_RESET);
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) {
+ if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR) == 0) {
+ break;
+ }
+ }
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Reset request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ stall_success = mali_mmu_enable_stall(mmu);
+ if (!stall_success) {
+ err = _MALI_OSK_ERR_BUSY;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description));
+
+ if (_MALI_OSK_ERR_OK == mali_mmu_raw_reset(mmu)) {
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ /* no session is active, so just activate the empty page directory */
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory_phys);
+ mali_mmu_enable_paging(mmu);
+ err = _MALI_OSK_ERR_OK;
+ }
+ mali_mmu_disable_stall(mmu);
+
+ return err;
+}
+
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu)
+{
+ mali_bool stall_success = mali_mmu_enable_stall(mmu);
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+ if (MALI_FALSE == stall_success) {
+ /* False means that it is in Pagefault state. Not possible to disable_stall then */
+ return MALI_FALSE;
+ }
+
+ mali_mmu_disable_stall(mmu);
+ return MALI_TRUE;
+}
+
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu)
+{
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+}
+
+
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address)
+{
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address));
+}
+
+static void mali_mmu_activate_address_space(struct mali_mmu_core *mmu, u32 page_directory)
+{
+ /* The MMU must be in stalled or page fault mode, for this writing to work */
+ MALI_DEBUG_ASSERT(0 != (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)
+ & (MALI_MMU_STATUS_BIT_STALL_ACTIVE | MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, page_directory);
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+}
+
+void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir)
+{
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ MALI_DEBUG_PRINT(5, ("Asked to activate page directory 0x%x on MMU %s\n", pagedir, mmu->hw_core.description));
+
+ stall_success = mali_mmu_enable_stall(mmu);
+ MALI_DEBUG_ASSERT(stall_success);
+ MALI_IGNORE(stall_success);
+ mali_mmu_activate_address_space(mmu, pagedir->page_directory);
+ mali_mmu_disable_stall(mmu);
+}
+
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu)
+{
+ mali_bool stall_success;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+ MALI_DEBUG_PRINT(3, ("Activating the empty page directory on MMU %s\n", mmu->hw_core.description));
+
+ stall_success = mali_mmu_enable_stall(mmu);
+
+ /* This function can only be called when the core is idle, so it could not fail. */
+ MALI_DEBUG_ASSERT(stall_success);
+ MALI_IGNORE(stall_success);
+
+ mali_mmu_activate_address_space(mmu, mali_empty_page_directory_phys);
+ mali_mmu_disable_stall(mmu);
+}
+
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core *mmu)
+{
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ MALI_DEBUG_PRINT(3, ("Activating the page fault flush page directory on MMU %s\n", mmu->hw_core.description));
+ stall_success = mali_mmu_enable_stall(mmu);
+ /* This function is expect to fail the stalling, since it might be in PageFault mode when it is called */
+ mali_mmu_activate_address_space(mmu, mali_page_fault_flush_page_directory);
+ if (MALI_TRUE == stall_success) mali_mmu_disable_stall(mmu);
+}
+
+/* Is called when we want the mmu to give an interrupt */
+static void mali_mmu_probe_trigger(void *data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+}
+
+/* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+ u32 int_stat;
+
+ int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+
+ MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat));
+ if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT) {
+ MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n"));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+ } else {
+ MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n"));
+ }
+
+ if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR) {
+ MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n"));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ } else {
+ MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n"));
+ }
+
+ if ((int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
+ (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) {
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+#if 0
+void mali_mmu_print_state(struct mali_mmu_core *mmu)
+{
+ MALI_DEBUG_PRINT(2, ("MMU: State of %s is 0x%08x\n", mmu->hw_core.description, mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+#endif
diff --git a/drivers/gpu/arm/utgard/common/mali_mmu.h b/drivers/gpu/arm/utgard/common/mali_mmu.h
new file mode 100644
index 000000000000..101c968bd45d
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_mmu.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_H__
+#define __MALI_MMU_H__
+
+#include "mali_osk.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_hw_core.h"
+
+/* Forward declaration from mali_group.h */
+struct mali_group;
+
+/**
+ * MMU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_mmu_register {
+ MALI_MMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
+ MALI_MMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */
+ MALI_MMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */
+ MALI_MMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */
+ MALI_MMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */
+ MALI_MMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */
+ MALI_MMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */
+ MALI_MMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */
+ MALI_MMU_REGISTER_INT_STATUS = 0x0020 /**< Interrupt status based on the mask */
+} mali_mmu_register;
+
+/**
+ * MMU interrupt register bits
+ * Each cause of the interrupt is reported
+ * through the (raw) interrupt status registers.
+ * Multiple interrupts can be pending, so multiple bits
+ * can be set at once.
+ */
+typedef enum mali_mmu_interrupt {
+ MALI_MMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
+ MALI_MMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
+} mali_mmu_interrupt;
+
+typedef enum mali_mmu_status_bits {
+ MALI_MMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
+ MALI_MMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
+ MALI_MMU_STATUS_BIT_IDLE = 1 << 3,
+ MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
+ MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE = 1 << 31,
+} mali_mmu_status_bits;
+
+/**
+ * Definition of the MMU struct
+ * Used to track a MMU unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_mmu_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+};
+
+_mali_osk_errcode_t mali_mmu_initialize(void);
+
+void mali_mmu_terminate(void);
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual);
+void mali_mmu_delete(struct mali_mmu_core *mmu);
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu);
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu);
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu);
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address);
+
+void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir);
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu);
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core *mmu);
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu);
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_mmu_get_interrupt_result(struct mali_mmu_core *mmu)
+{
+ u32 rawstat_used = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT);
+ if (0 == rawstat_used) {
+ return MALI_INTERRUPT_RESULT_NONE;
+ }
+ return MALI_INTERRUPT_RESULT_ERROR;
+}
+
+
+MALI_STATIC_INLINE u32 mali_mmu_get_int_status(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_rawstat(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT);
+}
+
+MALI_STATIC_INLINE void mali_mmu_mask_all_interrupts(struct mali_mmu_core *mmu)
+{
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, 0);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_status(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+}
+
+MALI_STATIC_INLINE u32 mali_mmu_get_page_fault_addr(struct mali_mmu_core *mmu)
+{
+ return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_PAGE_FAULT_ADDR);
+}
+
+#endif /* __MALI_MMU_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.c b/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.c
new file mode 100644
index 000000000000..126fd77ec9c9
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.c
@@ -0,0 +1,495 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_memory.h"
+#include "mali_l2_cache.h"
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data);
+
+u32 mali_allocate_empty_page(mali_io_address *virt_addr)
+{
+ _mali_osk_errcode_t err;
+ mali_io_address mapping;
+ mali_dma_addr address;
+
+ if (_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping)) {
+ /* Allocation failed */
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to get table page for empty pgdir\n"));
+ return 0;
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(mapping);
+
+ err = fill_page(mapping, 0);
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_mmu_release_table_page(address, mapping);
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to zero page\n"));
+ return 0;
+ }
+
+ *virt_addr = mapping;
+ return address;
+}
+
+void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr)
+{
+ if (MALI_INVALID_PAGE != address) {
+ mali_mmu_release_table_page(address, virt_addr);
+ }
+}
+
+_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory,
+ mali_io_address *page_directory_mapping,
+ mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+ mali_dma_addr *data_page, mali_io_address *data_page_mapping)
+{
+ _mali_osk_errcode_t err;
+
+ err = mali_mmu_get_table_page(data_page, data_page_mapping);
+ if (_MALI_OSK_ERR_OK == err) {
+ err = mali_mmu_get_table_page(page_table, page_table_mapping);
+ if (_MALI_OSK_ERR_OK == err) {
+ err = mali_mmu_get_table_page(page_directory, page_directory_mapping);
+ if (_MALI_OSK_ERR_OK == err) {
+ fill_page(*data_page_mapping, 0);
+ fill_page(*page_table_mapping, *data_page | MALI_MMU_FLAGS_DEFAULT);
+ fill_page(*page_directory_mapping, *page_table | MALI_MMU_FLAGS_PRESENT);
+ MALI_SUCCESS;
+ }
+ mali_mmu_release_table_page(*page_table, *page_table_mapping);
+ *page_table = MALI_INVALID_PAGE;
+ }
+ mali_mmu_release_table_page(*data_page, *data_page_mapping);
+ *data_page = MALI_INVALID_PAGE;
+ }
+ return err;
+}
+
+void mali_destroy_fault_flush_pages(
+ mali_dma_addr *page_directory, mali_io_address *page_directory_mapping,
+ mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+ mali_dma_addr *data_page, mali_io_address *data_page_mapping)
+{
+ if (MALI_INVALID_PAGE != *page_directory) {
+ mali_mmu_release_table_page(*page_directory, *page_directory_mapping);
+ *page_directory = MALI_INVALID_PAGE;
+ *page_directory_mapping = NULL;
+ }
+
+ if (MALI_INVALID_PAGE != *page_table) {
+ mali_mmu_release_table_page(*page_table, *page_table_mapping);
+ *page_table = MALI_INVALID_PAGE;
+ *page_table_mapping = NULL;
+ }
+
+ if (MALI_INVALID_PAGE != *data_page) {
+ mali_mmu_release_table_page(*data_page, *data_page_mapping);
+ *data_page = MALI_INVALID_PAGE;
+ *data_page_mapping = NULL;
+ }
+}
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
+{
+ int i;
+ MALI_DEBUG_ASSERT_POINTER(mapping);
+
+ for (i = 0; i < MALI_MMU_PAGE_SIZE / 4; i++) {
+ _mali_osk_mem_iowrite32_relaxed(mapping, i * sizeof(u32), data);
+ }
+ _mali_osk_mem_barrier();
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+ const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+ _mali_osk_errcode_t err;
+ mali_io_address pde_mapping;
+ mali_dma_addr pde_phys;
+ int i, page_count;
+ u32 start_address;
+ if (last_pde < first_pde)
+ return _MALI_OSK_ERR_INVALID_ARGS;
+
+ for (i = first_pde; i <= last_pde; i++) {
+ if (0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) {
+ /* Page table not present */
+ MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+ MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]);
+
+ err = mali_mmu_get_table_page(&pde_phys, &pde_mapping);
+ if (_MALI_OSK_ERR_OK != err) {
+ MALI_PRINT_ERROR(("Failed to allocate page table page.\n"));
+ return err;
+ }
+ pagedir->page_entries_mapped[i] = pde_mapping;
+
+ /* Update PDE, mark as present */
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32),
+ pde_phys | MALI_MMU_FLAGS_PRESENT);
+
+ MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+ }
+
+ if (first_pde == last_pde) {
+ pagedir->page_entries_usage_count[i] += size / MALI_MMU_PAGE_SIZE;
+ } else if (i == first_pde) {
+ start_address = i * MALI_MMU_VIRTUAL_PAGE_SIZE;
+ page_count = (start_address + MALI_MMU_VIRTUAL_PAGE_SIZE - mali_address) / MALI_MMU_PAGE_SIZE;
+ pagedir->page_entries_usage_count[i] += page_count;
+ } else if (i == last_pde) {
+ start_address = i * MALI_MMU_VIRTUAL_PAGE_SIZE;
+ page_count = (mali_address + size - start_address) / MALI_MMU_PAGE_SIZE;
+ pagedir->page_entries_usage_count[i] += page_count;
+ } else {
+ pagedir->page_entries_usage_count[i] = 1024;
+ }
+ }
+ _mali_osk_write_mem_barrier();
+
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size)
+{
+ int i;
+ const int first_pte = MALI_MMU_PTE_ENTRY(mali_address);
+ const int last_pte = MALI_MMU_PTE_ENTRY(mali_address + size - 1);
+
+ for (i = first_pte; i <= last_pte; i++) {
+ _mali_osk_mem_iowrite32_relaxed(page_table, i * sizeof(u32), 0);
+ }
+}
+
+static u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
+{
+ return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ index * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+}
+
+
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+ const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+ u32 left = size;
+ int i;
+ mali_bool pd_changed = MALI_FALSE;
+ u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
+ u32 num_pages_inv = 0;
+ mali_bool invalidate_all = MALI_FALSE; /* safety mechanism in case page_entries_usage_count is unreliable */
+
+ /* For all page directory entries in range. */
+ for (i = first_pde; i <= last_pde; i++) {
+ u32 size_in_pde, offset;
+
+ MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
+ MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);
+
+ /* Offset into page table, 0 if mali_address is 4MiB aligned */
+ offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
+ if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset) {
+ size_in_pde = left;
+ } else {
+ size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
+ }
+
+ pagedir->page_entries_usage_count[i] -= size_in_pde / MALI_MMU_PAGE_SIZE;
+
+ /* If entire page table is unused, free it */
+ if (0 == pagedir->page_entries_usage_count[i]) {
+ u32 page_phys;
+ void *page_virt;
+ MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
+ /* last reference removed, no need to zero out each PTE */
+
+ page_phys = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32)));
+ page_virt = pagedir->page_entries_mapped[i];
+ pagedir->page_entries_mapped[i] = NULL;
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
+
+ mali_mmu_release_table_page(page_phys, page_virt);
+ pd_changed = MALI_TRUE;
+ } else {
+ MALI_DEBUG_ASSERT(num_pages_inv < 2);
+ if (num_pages_inv < 2) {
+ pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
+ num_pages_inv++;
+ } else {
+ invalidate_all = MALI_TRUE;
+ }
+
+ /* If part of the page table is still in use, zero the relevant PTEs */
+ mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
+ }
+
+ left -= size_in_pde;
+ mali_address += size_in_pde;
+ }
+ _mali_osk_write_mem_barrier();
+
+ /* L2 pages invalidation */
+ if (MALI_TRUE == pd_changed) {
+ MALI_DEBUG_ASSERT(num_pages_inv < 3);
+ if (num_pages_inv < 3) {
+ pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
+ num_pages_inv++;
+ } else {
+ invalidate_all = MALI_TRUE;
+ }
+ }
+
+ if (invalidate_all) {
+ mali_l2_cache_invalidate_all();
+ } else {
+ mali_l2_cache_invalidate_all_pages(pages_to_invalidate, num_pages_inv);
+ }
+
+ MALI_SUCCESS;
+}
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void)
+{
+ struct mali_page_directory *pagedir;
+ _mali_osk_errcode_t err;
+ mali_dma_addr phys;
+
+ pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
+ if (NULL == pagedir) {
+ return NULL;
+ }
+
+ err = mali_mmu_get_table_page(&phys, &pagedir->page_directory_mapped);
+ if (_MALI_OSK_ERR_OK != err) {
+ _mali_osk_free(pagedir);
+ return NULL;
+ }
+
+ pagedir->page_directory = (u32)phys;
+
+ /* Zero page directory */
+ fill_page(pagedir->page_directory_mapped, 0);
+
+ return pagedir;
+}
+
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
+{
+ const int num_page_table_entries = sizeof(pagedir->page_entries_mapped) / sizeof(pagedir->page_entries_mapped[0]);
+ int i;
+
+ /* Free referenced page tables and zero PDEs. */
+ for (i = 0; i < num_page_table_entries; i++) {
+ if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(
+ pagedir->page_directory_mapped,
+ sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) {
+ mali_dma_addr phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK;
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
+ mali_mmu_release_table_page(phys, pagedir->page_entries_mapped[i]);
+ }
+ }
+ _mali_osk_write_mem_barrier();
+
+ /* Free the page directory page. */
+ mali_mmu_release_table_page(pagedir->page_directory, pagedir->page_directory_mapped);
+
+ _mali_osk_free(pagedir);
+}
+
+
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address,
+ mali_dma_addr phys_address, u32 size, u32 permission_bits)
+{
+ u32 end_address = mali_address + size;
+ u32 mali_phys = (u32)phys_address;
+
+ /* Map physical pages into MMU page tables */
+ for (; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, mali_phys += MALI_MMU_PAGE_SIZE) {
+ MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
+ MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
+ mali_phys | permission_bits);
+ }
+}
+
+void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr)
+{
+#if defined(DEBUG)
+ u32 pde_index, pte_index;
+ u32 pde, pte;
+
+ pde_index = MALI_MMU_PDE_ENTRY(fault_addr);
+ pte_index = MALI_MMU_PTE_ENTRY(fault_addr);
+
+
+ pde = _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ pde_index * sizeof(u32));
+
+
+ if (pde & MALI_MMU_FLAGS_PRESENT) {
+ u32 pte_addr = MALI_MMU_ENTRY_ADDRESS(pde);
+
+ pte = _mali_osk_mem_ioread32(pagedir->page_entries_mapped[pde_index],
+ pte_index * sizeof(u32));
+
+ MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table present: %08x\n"
+ "\t\tPTE: %08x, page %08x is %s\n",
+ fault_addr, pte_addr, pte,
+ MALI_MMU_ENTRY_ADDRESS(pte),
+ pte & MALI_MMU_FLAGS_DEFAULT ? "rw" : "not present"));
+ } else {
+ MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table not present: %08x\n",
+ fault_addr, pde));
+ }
+#else
+ MALI_IGNORE(pagedir);
+ MALI_IGNORE(fault_addr);
+#endif
+}
+
+/* For instrumented */
+struct dump_info {
+ u32 buffer_left;
+ u32 register_writes_size;
+ u32 page_table_dump_size;
+ u32 *buffer;
+};
+
+static _mali_osk_errcode_t writereg(u32 where, u32 what, const char *comment, struct dump_info *info)
+{
+ if (NULL != info) {
+ info->register_writes_size += sizeof(u32) * 2; /* two 32-bit words */
+
+ if (NULL != info->buffer) {
+ /* check that we have enough space */
+ if (info->buffer_left < sizeof(u32) * 2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = where;
+ info->buffer++;
+
+ *info->buffer = what;
+ info->buffer++;
+
+ info->buffer_left -= sizeof(u32) * 2;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_mmu_dump_page(mali_io_address page, u32 phys_addr, struct dump_info *info)
+{
+ if (NULL != info) {
+ /* 4096 for the page and 4 bytes for the address */
+ const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
+ const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
+ const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;
+
+ info->page_table_dump_size += dump_size_in_bytes;
+
+ if (NULL != info->buffer) {
+ if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = phys_addr;
+ info->buffer++;
+
+ _mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
+ info->buffer += page_size_in_elements;
+
+ info->buffer_left -= dump_size_in_bytes;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info *info)
+{
+ MALI_DEBUG_ASSERT_POINTER(pagedir);
+ MALI_DEBUG_ASSERT_POINTER(info);
+
+ if (NULL != pagedir->page_directory_mapped) {
+ int i;
+
+ MALI_CHECK_NO_ERROR(
+ mali_mmu_dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info)
+ );
+
+ for (i = 0; i < 1024; i++) {
+ if (NULL != pagedir->page_entries_mapped[i]) {
+ MALI_CHECK_NO_ERROR(
+ mali_mmu_dump_page(pagedir->page_entries_mapped[i],
+ _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
+ );
+ }
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_registers(struct mali_page_directory *pagedir, struct dump_info *info)
+{
+ MALI_CHECK_NO_ERROR(writereg(0x00000000, pagedir->page_directory,
+ "set the page directory address", info));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args)
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ struct mali_session_data *session_data;
+
+ session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+ args->size = info.register_writes_size + info.page_table_dump_size;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args)
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ struct mali_session_data *session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ session_data = (struct mali_session_data *)(uintptr_t)(args->ctx);
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ info.buffer_left = args->size;
+ info.buffer = (u32 *)(uintptr_t)args->buffer;
+
+ args->register_writes = (uintptr_t)info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+
+ args->page_table_dump = (uintptr_t)info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+
+ args->register_writes_size = info.register_writes_size;
+ args->page_table_dump_size = info.page_table_dump_size;
+
+ MALI_SUCCESS;
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.h b/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.h
new file mode 100644
index 000000000000..561fb60b9803
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_mmu_page_directory.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_PAGE_DIRECTORY_H__
+#define __MALI_MMU_PAGE_DIRECTORY_H__
+
+#include "mali_osk.h"
+
+/**
+ * Size of an MMU page in bytes
+ */
+#define MALI_MMU_PAGE_SIZE 0x1000
+
+/*
+ * Size of the address space referenced by a page table page
+ */
+#define MALI_MMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
+
+/**
+ * Page directory index from address
+ * Calculates the page directory index from the given address
+ */
+#define MALI_MMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
+
+/**
+ * Page table index from address
+ * Calculates the page table index from the given address
+ */
+#define MALI_MMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
+
+/**
+ * Extract the memory address from an PDE/PTE entry
+ */
+#define MALI_MMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
+
+#define MALI_INVALID_PAGE ((u32)(~0))
+
+/**
+ *
+ */
+typedef enum mali_mmu_entry_flags {
+ MALI_MMU_FLAGS_PRESENT = 0x01,
+ MALI_MMU_FLAGS_READ_PERMISSION = 0x02,
+ MALI_MMU_FLAGS_WRITE_PERMISSION = 0x04,
+ MALI_MMU_FLAGS_OVERRIDE_CACHE = 0x8,
+ MALI_MMU_FLAGS_WRITE_CACHEABLE = 0x10,
+ MALI_MMU_FLAGS_WRITE_ALLOCATE = 0x20,
+ MALI_MMU_FLAGS_WRITE_BUFFERABLE = 0x40,
+ MALI_MMU_FLAGS_READ_CACHEABLE = 0x80,
+ MALI_MMU_FLAGS_READ_ALLOCATE = 0x100,
+ MALI_MMU_FLAGS_MASK = 0x1FF,
+} mali_mmu_entry_flags;
+
+
+#define MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE ( \
+ MALI_MMU_FLAGS_PRESENT | \
+ MALI_MMU_FLAGS_READ_PERMISSION | \
+ MALI_MMU_FLAGS_WRITE_PERMISSION | \
+ MALI_MMU_FLAGS_OVERRIDE_CACHE | \
+ MALI_MMU_FLAGS_WRITE_CACHEABLE | \
+ MALI_MMU_FLAGS_WRITE_BUFFERABLE | \
+ MALI_MMU_FLAGS_READ_CACHEABLE | \
+ MALI_MMU_FLAGS_READ_ALLOCATE )
+
+#define MALI_MMU_FLAGS_DEFAULT ( \
+ MALI_MMU_FLAGS_PRESENT | \
+ MALI_MMU_FLAGS_READ_PERMISSION | \
+ MALI_MMU_FLAGS_WRITE_PERMISSION )
+
+
+struct mali_page_directory {
+ u32 page_directory; /**< Physical address of the memory session's page directory */
+ mali_io_address page_directory_mapped; /**< Pointer to the mapped version of the page directory into the kernel's address space */
+
+ mali_io_address page_entries_mapped[1024]; /**< Pointers to the page tables which exists in the page directory mapped into the kernel's address space */
+ u32 page_entries_usage_count[1024]; /**< Tracks usage count of the page table pages, so they can be releases on the last reference */
+};
+
+/* Map Mali virtual address space (i.e. ensure page tables exist for the virtual range) */
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+
+/* Back virtual address space with actual pages. Assumes input is contiguous and 4k aligned. */
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address,
+ mali_dma_addr phys_address, u32 size, u32 permission_bits);
+
+u32 mali_allocate_empty_page(mali_io_address *virtual);
+void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr);
+_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory,
+ mali_io_address *page_directory_mapping,
+ mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+ mali_dma_addr *data_page, mali_io_address *data_page_mapping);
+void mali_destroy_fault_flush_pages(
+ mali_dma_addr *page_directory, mali_io_address *page_directory_mapping,
+ mali_dma_addr *page_table, mali_io_address *page_table_mapping,
+ mali_dma_addr *data_page, mali_io_address *data_page_mapping);
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void);
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir);
+
+void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr);
+
+#endif /* __MALI_MMU_PAGE_DIRECTORY_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_osk.h b/drivers/gpu/arm/utgard/common/mali_osk.h
new file mode 100644
index 000000000000..4c9e57cba18f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_osk.h
@@ -0,0 +1,1397 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk.h
+ * Defines the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_H__
+#define __MALI_OSK_H__
+
+#include "mali_osk_types.h"
+#include "mali_osk_specific.h" /* include any per-os specifics */
+#include "mali_osk_locks.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @addtogroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+#ifdef DEBUG
+/** @brief Macro for asserting that the current thread holds a given lock
+ */
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) MALI_DEBUG_ASSERT(_mali_osk_lock_get_owner((_mali_osk_lock_debug_t *)l) == _mali_osk_get_tid());
+
+/** @brief returns a lock's owner (thread id) if debugging is enabled
+ */
+#else
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) do {} while(0)
+#endif
+
+/** @} */ /* end group _mali_osk_lock */
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Find the containing structure of another structure
+ *
+ * This is the reverse of the operation 'offsetof'. This means that the
+ * following condition is satisfied:
+ *
+ * ptr == _MALI_OSK_CONTAINER_OF( &ptr->member, type, member )
+ *
+ * When ptr is of type 'type'.
+ *
+ * Its purpose it to recover a larger structure that has wrapped a smaller one.
+ *
+ * @note no type or memory checking occurs to ensure that a wrapper structure
+ * does in fact exist, and that it is being recovered with respect to the
+ * correct member.
+ *
+ * @param ptr the pointer to the member that is contained within the larger
+ * structure
+ * @param type the type of the structure that contains the member
+ * @param member the name of the member in the structure that ptr points to.
+ * @return a pointer to a \a type object which contains \a member, as pointed
+ * to by \a ptr.
+ */
+#define _MALI_OSK_CONTAINER_OF(ptr, type, member) \
+ ((type *)( ((char *)ptr) - offsetof(type,member) ))
+
+/** @addtogroup _mali_osk_wq
+ * @{ */
+
+/** @brief Initialize work queues (for deferred work)
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_wq_init(void);
+
+/** @brief Terminate work queues (for deferred work)
+ */
+void _mali_osk_wq_term(void);
+
+/** @brief Create work in the work queue
+ *
+ * Creates a work object which can be scheduled in the work queue. When
+ * scheduled, \a handler will be called with \a data as the argument.
+ *
+ * Refer to \ref _mali_osk_wq_schedule_work() for details on how work
+ * is scheduled in the queue.
+ *
+ * The returned pointer must be freed with \ref _mali_osk_wq_delete_work()
+ * when no longer needed.
+ */
+_mali_osk_wq_work_t *_mali_osk_wq_create_work(_mali_osk_wq_work_handler_t handler, void *data);
+
+/** @brief A high priority version of \a _mali_osk_wq_create_work()
+ *
+ * Creates a work object which can be scheduled in the high priority work queue.
+ *
+ * This is unfortunately needed to get low latency scheduling of the Mali cores. Normally we would
+ * schedule the next job in hw_irq or tasklet, but often we can't since we need to synchronously map
+ * and unmap shared memory when a job is connected to external fences (timelines). And this requires
+ * taking a mutex.
+ *
+ * We do signal a lot of other (low priority) work also as part of the job being finished, and if we
+ * don't set this Mali scheduling thread as high priority, we see that the CPU scheduler often runs
+ * random things instead of starting the next GPU job when the GPU is idle. So setting the gpu
+ * scheduler to high priority does give a visually more responsive system.
+ *
+ * Start the high priority work with: \a _mali_osk_wq_schedule_work_high_pri()
+ */
+_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri(_mali_osk_wq_work_handler_t handler, void *data);
+
+/** @brief Delete a work object
+ *
+ * This will flush the work queue to ensure that the work handler will not
+ * be called after deletion.
+ */
+void _mali_osk_wq_delete_work(_mali_osk_wq_work_t *work);
+
+/** @brief Delete a work object
+ *
+ * This will NOT flush the work queue, so only call this if you are sure that the work handler will
+ * not be called after deletion.
+ */
+void _mali_osk_wq_delete_work_nonflush(_mali_osk_wq_work_t *work);
+
+/** @brief Cause a queued, deferred call of the work handler
+ *
+ * _mali_osk_wq_schedule_work provides a mechanism for enqueuing deferred calls
+ * to the work handler. After calling \ref _mali_osk_wq_schedule_work(), the
+ * work handler will be scheduled to run at some point in the future.
+ *
+ * Typically this is called by the IRQ upper-half to defer further processing of
+ * IRQ-related work to the IRQ bottom-half handler. This is necessary for work
+ * that cannot be done in an IRQ context by the IRQ upper-half handler. Timer
+ * callbacks also use this mechanism, because they are treated as though they
+ * operate in an IRQ context. Refer to \ref _mali_osk_timer_t for more
+ * information.
+ *
+ * Code that operates in a kernel-process context (with no IRQ context
+ * restrictions) may also enqueue deferred calls to the IRQ bottom-half. The
+ * advantage over direct calling is that deferred calling allows the caller and
+ * IRQ bottom half to hold the same mutex, with a guarantee that they will not
+ * deadlock just by using this mechanism.
+ *
+ * _mali_osk_wq_schedule_work() places deferred call requests on a queue, to
+ * allow for more than one thread to make a deferred call. Therfore, if it is
+ * called 'K' times, then the IRQ bottom-half will be scheduled 'K' times too.
+ * 'K' is a number that is implementation-specific.
+ *
+ * _mali_osk_wq_schedule_work() is guaranteed to not block on:
+ * - enqueuing a deferred call request.
+ * - the completion of the work handler.
+ *
+ * This is to prevent deadlock. For example, if _mali_osk_wq_schedule_work()
+ * blocked, then it would cause a deadlock when the following two conditions
+ * hold:
+ * - The work handler callback (of type _mali_osk_wq_work_handler_t) locks
+ * a mutex
+ * - And, at the same time, the caller of _mali_osk_wq_schedule_work() also
+ * holds the same mutex
+ *
+ * @note care must be taken to not overflow the queue that
+ * _mali_osk_wq_schedule_work() operates on. Code must be structured to
+ * ensure that the number of requests made to the queue is bounded. Otherwise,
+ * work will be lost.
+ *
+ * The queue that _mali_osk_wq_schedule_work implements is a FIFO of N-writer,
+ * 1-reader type. The writers are the callers of _mali_osk_wq_schedule_work
+ * (all OSK-registered IRQ upper-half handlers in the system, watchdog timers,
+ * callers from a Kernel-process context). The reader is a single thread that
+ * handles all OSK-registered work.
+ *
+ * @param work a pointer to the _mali_osk_wq_work_t object corresponding to the
+ * work to begin processing.
+ */
+void _mali_osk_wq_schedule_work(_mali_osk_wq_work_t *work);
+
+/** @brief Cause a queued, deferred call of the high priority work handler
+ *
+ * Function is the same as \a _mali_osk_wq_schedule_work() with the only
+ * difference that it runs in a high (real time) priority on the system.
+ *
+ * Should only be used as a substitue for doing the same work in interrupts.
+ *
+ * This is allowed to sleep, but the work should be small since it will block
+ * all other applications.
+*/
+void _mali_osk_wq_schedule_work_high_pri(_mali_osk_wq_work_t *work);
+
+/** @brief Flush the work queue
+ *
+ * This will flush the OSK work queue, ensuring all work in the queue has
+ * completed before returning.
+ *
+ * Since this blocks on the completion of work in the work-queue, the
+ * caller of this function \b must \b not hold any mutexes that are taken by
+ * any registered work handler. To do so may cause a deadlock.
+ *
+ */
+void _mali_osk_wq_flush(void);
+
+/** @brief Create work in the delayed work queue
+ *
+ * Creates a work object which can be scheduled in the work queue. When
+ * scheduled, a timer will be start and the \a handler will be called with
+ * \a data as the argument when timer out
+ *
+ * Refer to \ref _mali_osk_wq_delayed_schedule_work() for details on how work
+ * is scheduled in the queue.
+ *
+ * The returned pointer must be freed with \ref _mali_osk_wq_delayed_delete_work_nonflush()
+ * when no longer needed.
+ */
+_mali_osk_wq_delayed_work_t *_mali_osk_wq_delayed_create_work(_mali_osk_wq_work_handler_t handler, void *data);
+
+/** @brief Delete a work object
+ *
+ * This will NOT flush the work queue, so only call this if you are sure that the work handler will
+ * not be called after deletion.
+ */
+void _mali_osk_wq_delayed_delete_work_nonflush(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Cancel a delayed work without waiting for it to finish
+ *
+ * Note that the \a work callback function may still be running on return from
+ * _mali_osk_wq_delayed_cancel_work_async().
+ *
+ * @param work The delayed work to be cancelled
+ */
+void _mali_osk_wq_delayed_cancel_work_async(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Cancel a delayed work and wait for it to finish
+ *
+ * When this function returns, the \a work was either cancelled or it finished running.
+ *
+ * @param work The delayed work to be cancelled
+ */
+void _mali_osk_wq_delayed_cancel_work_sync(_mali_osk_wq_delayed_work_t *work);
+
+/** @brief Put \a work task in global workqueue after delay
+ *
+ * After waiting for a given time this puts a job in the kernel-global
+ * workqueue.
+ *
+ * If \a work was already on a queue, this function will return without doing anything
+ *
+ * @param work job to be done
+ * @param delay number of jiffies to wait or 0 for immediate execution
+ */
+void _mali_osk_wq_delayed_schedule_work(_mali_osk_wq_delayed_work_t *work, u32 delay);
+
+/** @} */ /* end group _mali_osk_wq */
+
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief Initialize IRQ handling for a resource
+ *
+ * Registers an interrupt handler \a uhandler for the given IRQ number \a irqnum.
+ * \a data will be passed as argument to the handler when an interrupt occurs.
+ *
+ * If \a irqnum is -1, _mali_osk_irq_init will probe for the IRQ number using
+ * the supplied \a trigger_func and \a ack_func. These functions will also
+ * receive \a data as their argument.
+ *
+ * @param irqnum The IRQ number that the resource uses, as seen by the CPU.
+ * The value -1 has a special meaning which indicates the use of probing, and
+ * trigger_func and ack_func must be non-NULL.
+ * @param uhandler The interrupt handler, corresponding to a ISR handler for
+ * the resource
+ * @param int_data resource specific data, which will be passed to uhandler
+ * @param trigger_func Optional: a function to trigger the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param ack_func Optional: a function to acknowledge the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param probe_data resource-specific data, which will be passed to
+ * (if present) trigger_func and ack_func
+ * @param description textual description of the IRQ resource.
+ * @return on success, a pointer to a _mali_osk_irq_t object, which represents
+ * the IRQ handling on this resource. NULL on failure.
+ */
+_mali_osk_irq_t *_mali_osk_irq_init(u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description);
+
+/** @brief Terminate IRQ handling on a resource.
+ *
+ * This will disable the interrupt from the device, and then waits for any
+ * currently executing IRQ handlers to complete.
+ *
+ * @note If work is deferred to an IRQ bottom-half handler through
+ * \ref _mali_osk_wq_schedule_work(), be sure to flush any remaining work
+ * with \ref _mali_osk_wq_flush() or (implicitly) with \ref _mali_osk_wq_delete_work()
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ handling is to be terminated.
+ */
+void _mali_osk_irq_term(_mali_osk_irq_t *irq);
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @addtogroup _mali_osk_atomic
+ * @{ */
+
+/** @brief Decrement an atomic counter
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_dec(_mali_osk_atomic_t *atom);
+
+/** @brief Decrement an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter
+ * @return The new value, after decrement */
+u32 _mali_osk_atomic_dec_return(_mali_osk_atomic_t *atom);
+
+/** @brief Increment an atomic counter
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_inc(_mali_osk_atomic_t *atom);
+
+/** @brief Increment an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter */
+u32 _mali_osk_atomic_inc_return(_mali_osk_atomic_t *atom);
+
+/** @brief Initialize an atomic counter
+ *
+ * @note the parameter required is a u32, and so signed integers should be
+ * cast to u32.
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the value to initialize the atomic counter.
+ */
+void _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val);
+
+/** @brief Read a value from an atomic counter
+ *
+ * This can only be safely used to determine the value of the counter when it
+ * is guaranteed that other threads will not be modifying the counter. This
+ * makes its usefulness limited.
+ *
+ * @param atom pointer to an atomic counter
+ */
+u32 _mali_osk_atomic_read(_mali_osk_atomic_t *atom);
+
+/** @brief Terminate an atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ */
+void _mali_osk_atomic_term(_mali_osk_atomic_t *atom);
+
+/** @brief Assign a new val to atomic counter, and return the old atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the new value assign to the atomic counter
+ * @return the old value of the atomic counter
+ */
+u32 _mali_osk_atomic_xchg(_mali_osk_atomic_t *atom, u32 val);
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_memory OSK Memory Allocation
+ * @{ */
+
+/** @brief Allocate zero-initialized memory.
+ *
+ * Returns a buffer capable of containing at least \a n elements of \a size
+ * bytes each. The buffer is initialized to zero.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * @param n Number of elements to allocate
+ * @param size Size of each element
+ * @return On success, the zero-initialized buffer allocated. NULL on failure
+ */
+void *_mali_osk_calloc(u32 n, u32 size);
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_malloc(u32 size);
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_malloc() and _mali_osk_calloc()
+ * must be freed before the application exits. Otherwise,
+ * a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_free(void *ptr);
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * This function is potentially slower than _mali_osk_malloc() and _mali_osk_calloc(),
+ * but do support bigger sizes.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_valloc(u32 size);
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_valloc() must be freed before the
+ * application exits. Otherwise a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_vfree(void *ptr);
+
+/** @brief Copies memory.
+ *
+ * Copies the \a len bytes from the buffer pointed by the parameter \a src
+ * directly to the buffer pointed by \a dst.
+ *
+ * It is an error for \a src to overlap \a dst anywhere in \a len bytes.
+ *
+ * @param dst Pointer to the destination array where the content is to be
+ * copied.
+ * @param src Pointer to the source of data to be copied.
+ * @param len Number of bytes to copy.
+ * @return \a dst is always passed through unmodified.
+ */
+void *_mali_osk_memcpy(void *dst, const void *src, u32 len);
+
+/** @brief Fills memory.
+ *
+ * Sets the first \a n bytes of the block of memory pointed to by \a s to
+ * the specified value
+ * @param s Pointer to the block of memory to fill.
+ * @param c Value to be set, passed as u32. Only the 8 Least Significant Bits (LSB)
+ * are used.
+ * @param n Number of bytes to be set to the value.
+ * @return \a s is always passed through unmodified
+ */
+void *_mali_osk_memset(void *s, u32 c, u32 n);
+/** @} */ /* end group _mali_osk_memory */
+
+
+/** @brief Checks the amount of memory allocated
+ *
+ * Checks that not more than \a max_allocated bytes are allocated.
+ *
+ * Some OS bring up an interactive out of memory dialogue when the
+ * system runs out of memory. This can stall non-interactive
+ * apps (e.g. automated test runs). This function can be used to
+ * not trigger the OOM dialogue by keeping allocations
+ * within a certain limit.
+ *
+ * @return MALI_TRUE when \a max_allocated bytes are not in use yet. MALI_FALSE
+ * when at least \a max_allocated bytes are in use.
+ */
+mali_bool _mali_osk_mem_check_allocated(u32 max_allocated);
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Issue a memory barrier
+ *
+ * This defines an arbitrary memory barrier operation, which forces an ordering constraint
+ * on memory read and write operations.
+ */
+void _mali_osk_mem_barrier(void);
+
+/** @brief Issue a write memory barrier
+ *
+ * This defines an write memory barrier operation which forces an ordering constraint
+ * on memory write operations.
+ */
+void _mali_osk_write_mem_barrier(void);
+
+/** @brief Map a physically contiguous region into kernel space
+ *
+ * This is primarily used for mapping in registers from resources, and Mali-MMU
+ * page tables. The mapping is only visable from kernel-space.
+ *
+ * Access has to go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @param phys CPU-physical base address of the memory to map in. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * map in
+ * @param description A textual description of the memory being mapped in.
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure.
+ */
+mali_io_address _mali_osk_mem_mapioregion(uintptr_t phys, u32 size, const char *description);
+
+/** @brief Unmap a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_mapioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt an unmap twice
+ * - unmap only part of a range obtained through _mali_osk_mem_mapioregion
+ * - unmap more than the range obtained through _mali_osk_mem_mapioregion
+ * - unmap an address range that was not successfully mapped using
+ * _mali_osk_mem_mapioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in. This must be aligned to the system's page size, which is assumed
+ * to be 4K
+ * @param size The number of bytes that were originally mapped in.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_unmapioregion(uintptr_t phys, u32 size, mali_io_address mapping);
+
+/** @brief Allocate and Map a physically contiguous region into kernel space
+ *
+ * This is used for allocating physically contiguous regions (such as Mali-MMU
+ * page tables) and mapping them into kernel space. The mapping is only
+ * visible from kernel-space.
+ *
+ * The alignment of the returned memory is guaranteed to be at least
+ * _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * Access must go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @note This function is primarily to provide support for OSs that are
+ * incapable of separating the tasks 'allocate physically contiguous memory'
+ * and 'map it into kernel space'
+ *
+ * @param[out] phys CPU-physical base address of memory that was allocated.
+ * (*phys) will be guaranteed to be aligned to at least
+ * _MALI_OSK_CPU_PAGE_SIZE on success.
+ *
+ * @param[in] size the number of bytes of physically contiguous memory to
+ * allocate. This must be a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure, and (*phys) is unmodified.
+ */
+mali_io_address _mali_osk_mem_allocioregion(u32 *phys, u32 size);
+
+/** @brief Free a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_allocioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt a free twice on the same ioregion
+ * - free only part of a range obtained through _mali_osk_mem_allocioregion
+ * - free more than the range obtained through _mali_osk_mem_allocioregion
+ * - free an address range that was not successfully mapped using
+ * _mali_osk_mem_allocioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in, which was aligned to _MALI_OSK_CPU_PAGE_SIZE.
+ * @param size The number of bytes that were originally mapped in, which was
+ * a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_freeioregion(u32 phys, u32 size, mali_io_address mapping);
+
+/** @brief Request a region of physically contiguous memory
+ *
+ * This is used to ensure exclusive access to a region of physically contigous
+ * memory.
+ *
+ * It is acceptable to implement this as a stub. However, it is then the job
+ * of the System Integrator to ensure that no other device driver will be using
+ * the physical address ranges used by Mali, while the Mali device driver is
+ * loaded.
+ *
+ * @param phys CPU-physical base address of the memory to request. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * request.
+ * @param description A textual description of the memory being requested.
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_mem_reqregion(uintptr_t phys, u32 size, const char *description);
+
+/** @brief Un-request a region of physically contiguous memory
+ *
+ * This is used to release a regious of physically contiguous memory previously
+ * requested through _mali_osk_mem_reqregion, so that other device drivers may
+ * use it. This will be called at time of Mali device driver termination.
+ *
+ * It is a programming error to attempt to:
+ * - unrequest a region twice
+ * - unrequest only part of a range obtained through _mali_osk_mem_reqregion
+ * - unrequest more than the range obtained through _mali_osk_mem_reqregion
+ * - unrequest an address range that was not successfully requested using
+ * _mali_osk_mem_reqregion
+ *
+ * @param phys CPU-physical base address of the memory to un-request. This must
+ * be aligned to the system's page size, which is assumed to be 4K
+ * @param size the number of bytes of physically contiguous address space to
+ * un-request.
+ */
+void _mali_osk_mem_unreqregion(uintptr_t phys, u32 size);
+
+/** @brief Read from a location currently mapped in through
+ * _mali_osk_mem_mapioregion
+ *
+ * This reads a 32-bit word from a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to read from memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to read from
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @return the 32-bit word from the specified location.
+ */
+u32 _mali_osk_mem_ioread32(volatile mali_io_address mapping, u32 offset);
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion without memory barriers
+ *
+ * This write a 32-bit word to a 32-bit aligned location without using memory barrier.
+ * It is a programming error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32_relaxed(volatile mali_io_address addr, u32 offset, u32 val);
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion with write memory barrier
+ *
+ * This write a 32-bit word to a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32(volatile mali_io_address mapping, u32 offset, u32 val);
+
+/** @brief Flush all CPU caches
+ *
+ * This should only be implemented if flushing of the cache is required for
+ * memory mapped in through _mali_osk_mem_mapregion.
+ */
+void _mali_osk_cache_flushall(void);
+
+/** @brief Flush any caches necessary for the CPU and MALI to have the same view of a range of uncached mapped memory
+ *
+ * This should only be implemented if your OS doesn't do a full cache flush (inner & outer)
+ * after allocating uncached mapped memory.
+ *
+ * Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+void _mali_osk_cache_ensure_uncached_range_flushed(void *uncached_mapping, u32 offset, u32 size);
+
+/** @brief Safely copy as much data as possible from src to dest
+ *
+ * Do not crash if src or dest isn't available.
+ *
+ * @param dest Destination buffer (limited to user space mapped Mali memory)
+ * @param src Source buffer
+ * @param size Number of bytes to copy
+ * @return Number of bytes actually copied
+ */
+u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size);
+
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+/** @addtogroup _mali_osk_notification
+ *
+ * User space notification framework
+ *
+ * Communication with user space of asynchronous events is performed through a
+ * synchronous call to the \ref u_k_api.
+ *
+ * Since the events are asynchronous, the events have to be queued until a
+ * synchronous U/K API call can be made by user-space. A U/K API call might also
+ * be received before any event has happened. Therefore the notifications the
+ * different subsystems wants to send to user space has to be queued for later
+ * reception, or a U/K API call has to be blocked until an event has occured.
+ *
+ * Typical uses of notifications are after running of jobs on the hardware or
+ * when changes to the system is detected that needs to be relayed to user
+ * space.
+ *
+ * After an event has occured user space has to be notified using some kind of
+ * message. The notification framework supports sending messages to waiting
+ * threads or queueing of messages until a U/K API call is made.
+ *
+ * The notification queue is a FIFO. There are no restrictions on the numbers
+ * of readers or writers in the queue.
+ *
+ * A message contains what user space needs to identifiy how to handle an
+ * event. This includes a type field and a possible type specific payload.
+ *
+ * A notification to user space is represented by a
+ * \ref _mali_osk_notification_t object. A sender gets hold of such an object
+ * using _mali_osk_notification_create(). The buffer given by the
+ * _mali_osk_notification_t::result_buffer field in the object is used to store
+ * any type specific data. The other fields are internal to the queue system
+ * and should not be touched.
+ *
+ * @{ */
+
+/** @brief Create a notification object
+ *
+ * Returns a notification object which can be added to the queue of
+ * notifications pending for user space transfer.
+ *
+ * The implementation will initialize all members of the
+ * \ref _mali_osk_notification_t object. In particular, the
+ * _mali_osk_notification_t::result_buffer member will be initialized to point
+ * to \a size bytes of storage, and that storage will be suitably aligned for
+ * storage of any structure. That is, the created buffer meets the same
+ * requirements as _mali_osk_malloc().
+ *
+ * The notification object must be deleted when not in use. Use
+ * _mali_osk_notification_delete() for deleting it.
+ *
+ * @note You \b must \b not call _mali_osk_free() on a \ref _mali_osk_notification_t,
+ * object, or on a _mali_osk_notification_t::result_buffer. You must only use
+ * _mali_osk_notification_delete() to free the resources assocaited with a
+ * \ref _mali_osk_notification_t object.
+ *
+ * @param type The notification type
+ * @param size The size of the type specific buffer to send
+ * @return Pointer to a notification object with a suitable buffer, or NULL on error.
+ */
+_mali_osk_notification_t *_mali_osk_notification_create(u32 type, u32 size);
+
+/** @brief Delete a notification object
+ *
+ * This must be called to reclaim the resources of a notification object. This
+ * includes:
+ * - The _mali_osk_notification_t::result_buffer
+ * - The \ref _mali_osk_notification_t itself.
+ *
+ * A notification object \b must \b not be used after it has been deleted by
+ * _mali_osk_notification_delete().
+ *
+ * In addition, the notification object may not be deleted while it is in a
+ * queue. That is, if it has been placed on a queue with
+ * _mali_osk_notification_queue_send(), then it must not be deleted until
+ * it has been received by a call to _mali_osk_notification_queue_receive().
+ * Otherwise, the queue may be corrupted.
+ *
+ * @param object the notification object to delete.
+ */
+void _mali_osk_notification_delete(_mali_osk_notification_t *object);
+
+/** @brief Create a notification queue
+ *
+ * Creates a notification queue which can be used to queue messages for user
+ * delivery and get queued messages from
+ *
+ * The queue is a FIFO, and has no restrictions on the numbers of readers or
+ * writers.
+ *
+ * When the queue is no longer in use, it must be terminated with
+ * \ref _mali_osk_notification_queue_term(). Failure to do so will result in a
+ * memory leak.
+ *
+ * @return Pointer to a new notification queue or NULL on error.
+ */
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init(void);
+
+/** @brief Destroy a notification queue
+ *
+ * Destroys a notification queue and frees associated resources from the queue.
+ *
+ * A notification queue \b must \b not be destroyed in the following cases:
+ * - while there are \ref _mali_osk_notification_t objects in the queue.
+ * - while there are writers currently acting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_send() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_send() on the queue in the future.
+ * - while there are readers currently waiting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_receive() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_receive() on the queue in the future.
+ *
+ * Therefore, all \ref _mali_osk_notification_t objects must be flushed and
+ * deleted by the code that makes use of the notification queues, since only
+ * they know the structure of the _mali_osk_notification_t::result_buffer
+ * (even if it may only be a flat sturcture).
+ *
+ * @note Since the queue is a FIFO, the code using notification queues may
+ * create its own 'flush' type of notification, to assist in flushing the
+ * queue.
+ *
+ * Once the queue has been destroyed, it must not be used again.
+ *
+ * @param queue The queue to destroy
+ */
+void _mali_osk_notification_queue_term(_mali_osk_notification_queue_t *queue);
+
+/** @brief Schedule notification for delivery
+ *
+ * When a \ref _mali_osk_notification_t object has been created successfully
+ * and set up, it may be added to the queue of objects waiting for user space
+ * transfer.
+ *
+ * The sending will not block if the queue is full.
+ *
+ * A \ref _mali_osk_notification_t object \b must \b not be put on two different
+ * queues at the same time, or enqueued twice onto a single queue before
+ * reception. However, it is acceptable for it to be requeued \em after reception
+ * from a call to _mali_osk_notification_queue_receive(), even onto the same queue.
+ *
+ * Again, requeuing must also not enqueue onto two different queues at the same
+ * time, or enqueue onto the same queue twice before reception.
+ *
+ * @param queue The notification queue to add this notification to
+ * @param object The entry to add
+ */
+void _mali_osk_notification_queue_send(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object);
+
+/** @brief Receive a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the thread will sleep until one becomes ready.
+ * Therefore, notifications may not be received into an
+ * IRQ or 'atomic' context (that is, a context where sleeping is disallowed).
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_RESTARTSYSCALL if the sleep was interrupted.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_receive(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result);
+
+/** @brief Dequeues a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the function call will return an error code.
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if queue was empty.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result);
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @addtogroup _mali_osk_timer
+ *
+ * Timers use the OS's representation of time, which are 'ticks'. This is to
+ * prevent aliasing problems between the internal timer time, and the time
+ * asked for.
+ *
+ * @{ */
+
+/** @brief Initialize a timer
+ *
+ * Allocates resources for a new timer, and initializes them. This does not
+ * start the timer.
+ *
+ * @return a pointer to the allocated timer object, or NULL on failure.
+ */
+_mali_osk_timer_t *_mali_osk_timer_init(void);
+
+/** @brief Start a timer
+ *
+ * It is an error to start a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * It is an error to use this to start an already started timer.
+ *
+ * The timer will expire in \a ticks_to_expire ticks, at which point, the
+ * callback function will be invoked with the callback-specific data,
+ * as registered by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to start
+ * @param ticks_to_expire the amount of time in ticks for the timer to run
+ * before triggering.
+ */
+void _mali_osk_timer_add(_mali_osk_timer_t *tim, unsigned long ticks_to_expire);
+
+/** @brief Modify a timer
+ *
+ * Set the relative time at which a timer will expire, and start it if it is
+ * stopped. If \a ticks_to_expire 0 the timer fires immediately.
+ *
+ * It is an error to modify a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * The timer will expire at \a ticks_to_expire from the time of the call, at
+ * which point, the callback function will be invoked with the
+ * callback-specific data, as set by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to modify, and start if necessary
+ * @param ticks_to_expire the \em absolute time in ticks at which this timer
+ * should trigger.
+ *
+ */
+void _mali_osk_timer_mod(_mali_osk_timer_t *tim, unsigned long ticks_to_expire);
+
+/** @brief Stop a timer, and block on its completion.
+ *
+ * Stop the timer. When the function returns, it is guaranteed that the timer's
+ * callback will not be running on any CPU core.
+ *
+ * Since stoping the timer blocks on compeletion of the callback, the callback
+ * may not obtain any mutexes that the caller holds. Otherwise, a deadlock will
+ * occur.
+ *
+ * @note While the callback itself is guaranteed to not be running, work
+ * enqueued on the work-queue by the timer (with
+ * \ref _mali_osk_wq_schedule_work()) may still run. The timer callback and
+ * work handler must take this into account.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ *
+ */
+void _mali_osk_timer_del(_mali_osk_timer_t *tim);
+
+/** @brief Stop a timer.
+ *
+ * Stop the timer. When the function returns, the timer's callback may still be
+ * running on any CPU core.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ */
+void _mali_osk_timer_del_async(_mali_osk_timer_t *tim);
+
+/** @brief Check if timer is pending.
+ *
+ * Check if timer is active.
+ *
+ * @param tim the timer to check
+ * @return MALI_TRUE if time is active, MALI_FALSE if it is not active
+ */
+mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim);
+
+/** @brief Set a timer's callback parameters.
+ *
+ * This must be called at least once before a timer is started/modified.
+ *
+ * After a timer has been stopped or expires, the callback remains set. This
+ * means that restarting the timer will call the same function with the same
+ * parameters on expiry.
+ *
+ * @param tim the timer to set callback on.
+ * @param callback Function to call when timer expires
+ * @param data Function-specific data to supply to the function on expiry.
+ */
+void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data);
+
+/** @brief Terminate a timer, and deallocate resources.
+ *
+ * The timer must first be stopped by calling _mali_osk_timer_del().
+ *
+ * It is a programming error for _mali_osk_timer_term() to be called on:
+ * - timer that is currently running
+ * - a timer that is currently executing its callback.
+ *
+ * @param tim the timer to deallocate.
+ */
+void _mali_osk_timer_term(_mali_osk_timer_t *tim);
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @defgroup _mali_osk_time OSK Time functions
+ *
+ * \ref _mali_osk_time use the OS's representation of time, which are
+ * 'ticks'. This is to prevent aliasing problems between the internal timer
+ * time, and the time asked for.
+ *
+ * OS tick time is measured as a u32. The time stored in a u32 may either be
+ * an absolute time, or a time delta between two events. Whilst it is valid to
+ * use math opeartors to \em change the tick value represented as a u32, it
+ * is often only meaningful to do such operations on time deltas, rather than
+ * on absolute time. However, it is meaningful to add/subtract time deltas to
+ * absolute times.
+ *
+ * Conversion between tick time and milliseconds (ms) may not be loss-less,
+ * and are \em implementation \em depenedant.
+ *
+ * Code use OS time must take this into account, since:
+ * - a small OS time may (or may not) be rounded
+ * - a large time may (or may not) overflow
+ *
+ * @{ */
+
+/** @brief Return whether ticka occurs after or at the same time as tickb
+ *
+ * Systems where ticks can wrap must handle that.
+ *
+ * @param ticka ticka
+ * @param tickb tickb
+ * @return MALI_TRUE if ticka represents a time that occurs at or after tickb.
+ */
+mali_bool _mali_osk_time_after_eq(unsigned long ticka, unsigned long tickb);
+
+/** @brief Convert milliseconds to OS 'ticks'
+ *
+ * @param ms time interval in milliseconds
+ * @return the corresponding time interval in OS ticks.
+ */
+unsigned long _mali_osk_time_mstoticks(u32 ms);
+
+/** @brief Convert OS 'ticks' to milliseconds
+ *
+ * @param ticks time interval in OS ticks.
+ * @return the corresponding time interval in milliseconds
+ */
+u32 _mali_osk_time_tickstoms(unsigned long ticks);
+
+
+/** @brief Get the current time in OS 'ticks'.
+ * @return the current time in OS 'ticks'.
+ */
+unsigned long _mali_osk_time_tickcount(void);
+
+/** @brief Cause a microsecond delay
+ *
+ * The delay will have microsecond resolution, and is necessary for correct
+ * operation of the driver. At worst, the delay will be \b at least \a usecs
+ * microseconds, and so may be (significantly) more.
+ *
+ * This function may be implemented as a busy-wait, which is the most sensible
+ * implementation. On OSs where there are situations in which a thread must not
+ * sleep, this is definitely implemented as a busy-wait.
+ *
+ * @param usecs the number of microseconds to wait for.
+ */
+void _mali_osk_time_ubusydelay(u32 usecs);
+
+/** @brief Return time in nano seconds, since any given reference.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_time_get_ns(void);
+
+/** @brief Return time in nano seconds, since boot time.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_boot_time_get_ns(void);
+
+/** @} */ /* end group _mali_osk_time */
+
+/** @defgroup _mali_osk_math OSK Math
+ * @{ */
+
+/** @brief Count Leading Zeros (Little-endian)
+ *
+ * @note This function must be implemented to support the reference
+ * implementation of _mali_osk_find_first_zero_bit, as defined in
+ * mali_osk_bitops.h.
+ *
+ * @param val 32-bit words to count leading zeros on
+ * @return the number of leading zeros.
+ */
+u32 _mali_osk_clz(u32 val);
+
+/** @brief find last (most-significant) bit set
+ *
+ * @param val 32-bit words to count last bit set on
+ * @return last bit set.
+ */
+u32 _mali_osk_fls(u32 val);
+
+/** @} */ /* end group _mali_osk_math */
+
+/** @addtogroup _mali_osk_wait_queue OSK Wait Queue functionality
+ * @{ */
+
+/** @brief Initialize an empty Wait Queue */
+_mali_osk_wait_queue_t *_mali_osk_wait_queue_init(void);
+
+/** @brief Sleep if condition is false
+ *
+ * @param queue the queue to use
+ * @param condition function pointer to a boolean function
+ * @param data data parameter for condition function
+ *
+ * Put thread to sleep if the given \a condition function returns false. When
+ * being asked to wake up again, the condition will be re-checked and the
+ * thread only woken up if the condition is now true.
+ */
+void _mali_osk_wait_queue_wait_event(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data);
+
+/** @brief Sleep if condition is false
+ *
+ * @param queue the queue to use
+ * @param condition function pointer to a boolean function
+ * @param data data parameter for condition function
+ * @param timeout timeout in ms
+ *
+ * Put thread to sleep if the given \a condition function returns false. When
+ * being asked to wake up again, the condition will be re-checked and the
+ * thread only woken up if the condition is now true. Will return if time
+ * exceeds timeout.
+ */
+void _mali_osk_wait_queue_wait_event_timeout(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data, u32 timeout);
+
+/** @brief Wake up all threads in wait queue if their respective conditions are
+ * true
+ *
+ * @param queue the queue whose threads should be woken up
+ *
+ * Wake up all threads in wait queue \a queue whose condition is now true.
+ */
+void _mali_osk_wait_queue_wake_up(_mali_osk_wait_queue_t *queue);
+
+/** @brief terminate a wait queue
+ *
+ * @param queue the queue to terminate.
+ */
+void _mali_osk_wait_queue_term(_mali_osk_wait_queue_t *queue);
+/** @} */ /* end group _mali_osk_wait_queue */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Output a device driver debug message.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_dbgmsg(const char *fmt, ...);
+
+/** @brief Print fmt into buf.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param buf a pointer to the result buffer
+ * @param size the total number of bytes allowed to write to \a buf
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ * @return The number of bytes written to \a buf
+ */
+u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...);
+
+/** @brief Print fmt into print_ctx.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param print_ctx a pointer to the result file buffer
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_ctxprintf(_mali_osk_print_ctx *print_ctx, const char *fmt, ...);
+
+/** @brief Abnormal process abort.
+ *
+ * Terminates the caller-process if this function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h.
+ *
+ * This function will never return - because to continue from a Debug assert
+ * could cause even more problems, and hinder debugging of the initial problem.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_abort(void);
+
+/** @brief Sets breakpoint at point where function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h,
+ * to assist in debugging. If debugging at this level is not required, then this
+ * function may be implemented as a stub.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_break(void);
+
+/** @brief Return an identificator for calling process.
+ *
+ * @return Identificator for calling process.
+ */
+u32 _mali_osk_get_pid(void);
+
+/** @brief Return an name for calling process.
+ *
+ * @return name for calling process.
+ */
+char *_mali_osk_get_comm(void);
+
+/** @brief Return an identificator for calling thread.
+ *
+ * @return Identificator for calling thread.
+ */
+u32 _mali_osk_get_tid(void);
+
+
+/** @brief Take a reference to the power manager system for the Mali device (synchronously).
+ *
+ * When function returns successfully, Mali is ON.
+ *
+ * @note Call \a _mali_osk_pm_dev_ref_put() to release this reference.
+ */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void);
+
+/** @brief Take a reference to the external power manager system for the Mali device (asynchronously).
+ *
+ * Mali might not yet be on after this function as returned.
+ * Please use \a _mali_osk_pm_dev_barrier() or \a _mali_osk_pm_dev_ref_get_sync()
+ * to wait for Mali to be powered on.
+ *
+ * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
+ */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_async(void);
+
+/** @brief Release the reference to the external power manger system for the Mali device.
+ *
+ * When reference count reach zero, the cores can be off.
+ *
+ * @note This must be used to release references taken with
+ * \a _mali_osk_pm_dev_ref_get_sync() or \a _mali_osk_pm_dev_ref_get_sync().
+ */
+void _mali_osk_pm_dev_ref_put(void);
+
+/** @brief Block until pending PM operations are done
+ */
+void _mali_osk_pm_dev_barrier(void);
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @defgroup _mali_osk_bitmap OSK Bitmap
+ * @{ */
+
+/** @brief Allocate a unique number from the bitmap object.
+ *
+ * @param bitmap Initialized bitmap object.
+ * @return An unique existence in the bitmap object.
+ */
+u32 _mali_osk_bitmap_alloc(struct _mali_osk_bitmap *bitmap);
+
+/** @brief Free a interger to the bitmap object.
+ *
+ * @param bitmap Initialized bitmap object.
+ * @param obj An number allocated from bitmap object.
+ */
+void _mali_osk_bitmap_free(struct _mali_osk_bitmap *bitmap, u32 obj);
+
+/** @brief Allocate continuous number from the bitmap object.
+ *
+ * @param bitmap Initialized bitmap object.
+ * @return start number of the continuous number block.
+ */
+u32 _mali_osk_bitmap_alloc_range(struct _mali_osk_bitmap *bitmap, int cnt);
+
+/** @brief Free a block of continuous number block to the bitmap object.
+ *
+ * @param bitmap Initialized bitmap object.
+ * @param obj Start number.
+ * @param cnt The size of the continuous number block.
+ */
+void _mali_osk_bitmap_free_range(struct _mali_osk_bitmap *bitmap, u32 obj, int cnt);
+
+/** @brief Available count could be used to allocate in the given bitmap object.
+ *
+ */
+u32 _mali_osk_bitmap_avail(struct _mali_osk_bitmap *bitmap);
+
+/** @brief Initialize an bitmap object..
+ *
+ * @param bitmap An poiter of uninitialized bitmap object.
+ * @param num Size of thei bitmap object and decide the memory size allocated.
+ * @param reserve start number used to allocate.
+ */
+int _mali_osk_bitmap_init(struct _mali_osk_bitmap *bitmap, u32 num, u32 reserve);
+
+/** @brief Free the given bitmap object.
+ *
+ * @param bitmap Initialized bitmap object.
+ */
+void _mali_osk_bitmap_term(struct _mali_osk_bitmap *bitmap);
+/** @} */ /* end group _mali_osk_bitmap */
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+/* Check standard inlines */
+#ifndef MALI_STATIC_INLINE
+#error MALI_STATIC_INLINE not defined on your OS
+#endif
+
+#ifndef MALI_NON_STATIC_INLINE
+#error MALI_NON_STATIC_INLINE not defined on your OS
+#endif
+
+#endif /* __MALI_OSK_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_osk_bitops.h b/drivers/gpu/arm/utgard/common/mali_osk_bitops.h
new file mode 100644
index 000000000000..c1709f94c883
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_osk_bitops.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2010, 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_bitops.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_BITOPS_H__
+#define __MALI_OSK_BITOPS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+MALI_STATIC_INLINE void _mali_internal_clear_bit(u32 bit, u32 *addr)
+{
+ MALI_DEBUG_ASSERT(bit < 32);
+ MALI_DEBUG_ASSERT(NULL != addr);
+
+ (*addr) &= ~(1 << bit);
+}
+
+MALI_STATIC_INLINE void _mali_internal_set_bit(u32 bit, u32 *addr)
+{
+ MALI_DEBUG_ASSERT(bit < 32);
+ MALI_DEBUG_ASSERT(NULL != addr);
+
+ (*addr) |= (1 << bit);
+}
+
+MALI_STATIC_INLINE u32 _mali_internal_test_bit(u32 bit, u32 value)
+{
+ MALI_DEBUG_ASSERT(bit < 32);
+ return value & (1 << bit);
+}
+
+MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit(u32 value)
+{
+ u32 inverted;
+ u32 negated;
+ u32 isolated;
+ u32 leading_zeros;
+
+ /* Begin with xxx...x0yyy...y, where ys are 1, number of ys is in range 0..31 */
+ inverted = ~value; /* zzz...z1000...0 */
+ /* Using count_trailing_zeros on inverted value -
+ * See ARM System Developers Guide for details of count_trailing_zeros */
+
+ /* Isolate the zero: it is preceeded by a run of 1s, so add 1 to it */
+ negated = (u32) - inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */
+ /* negated = xxx...x1000...0 */
+
+ isolated = negated & inverted ; /* xxx...x1000...0 & zzz...z1000...0, zs are ~xs */
+ /* And so the first zero bit is in the same position as the 1 == number of 1s that preceeded it
+ * Note that the output is zero if value was all 1s */
+
+ leading_zeros = _mali_osk_clz(isolated);
+
+ return 31 - leading_zeros;
+}
+
+
+/** @defgroup _mali_osk_bitops OSK Non-atomic Bit-operations
+ * @{ */
+
+/**
+ * These bit-operations do not work atomically, and so locks must be used if
+ * atomicity is required.
+ *
+ * Reference implementations for Little Endian are provided, and so it should
+ * not normally be necessary to re-implement these. Efficient bit-twiddling
+ * techniques are used where possible, implemented in portable C.
+ *
+ * Note that these reference implementations rely on _mali_osk_clz() being
+ * implemented.
+ */
+
+/** @brief Clear a bit in a sequence of 32-bit words
+ * @param nr bit number to clear, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit(u32 nr, u32 *addr)
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5) - 1); /* The bit number within the word */
+
+ _mali_internal_clear_bit(nr, addr);
+}
+
+/** @brief Set a bit in a sequence of 32-bit words
+ * @param nr bit number to set, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit(u32 nr, u32 *addr)
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5) - 1); /* The bit number within the word */
+
+ _mali_internal_set_bit(nr, addr);
+}
+
+/** @brief Test a bit in a sequence of 32-bit words
+ * @param nr bit number to test, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ * @return zero if bit was clear, non-zero if set. Do not rely on the return
+ * value being related to the actual word under test.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_test_bit(u32 nr, u32 *addr)
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5) - 1); /* The bit number within the word */
+
+ return _mali_internal_test_bit(nr, *addr);
+}
+
+/* Return maxbit if not found */
+/** @brief Find the first zero bit in a sequence of 32-bit words
+ * @param addr starting point for search.
+ * @param maxbit the maximum number of bits to search
+ * @return the number of the first zero bit found, or maxbit if none were found
+ * in the specified range.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit(const u32 *addr, u32 maxbit)
+{
+ u32 total;
+
+ for (total = 0; total < maxbit; total += 32, ++addr) {
+ int result;
+ result = _mali_internal_find_first_zero_bit(*addr);
+
+ /* non-negative signifies the bit was found */
+ if (result >= 0) {
+ total += (u32)result;
+ break;
+ }
+ }
+
+ /* Now check if we reached maxbit or above */
+ if (total >= maxbit) {
+ total = maxbit;
+ }
+
+ return total; /* either the found bit nr, or maxbit if not found */
+}
+/** @} */ /* end group _mali_osk_bitops */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_BITOPS_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_osk_list.h b/drivers/gpu/arm/utgard/common/mali_osk_list.h
new file mode 100644
index 000000000000..22d22446d7a0
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_osk_list.h
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_list.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_LIST_H__
+#define __MALI_OSK_LIST_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+MALI_STATIC_INLINE void __mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = new_entry;
+ new_entry->next = next;
+ new_entry->prev = prev;
+ prev->next = new_entry;
+}
+
+MALI_STATIC_INLINE void __mali_osk_list_del(_mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** Reference implementations of Doubly-linked Circular Lists are provided.
+ * There is often no need to re-implement these.
+ *
+ * @note The implementation may differ subtly from any lists the OS provides.
+ * For this reason, these lists should not be mixed with OS-specific lists
+ * inside the OSK/UKK implementation. */
+
+/** @brief Initialize a list to be a head of an empty list
+ * @param exp the list to initialize. */
+#define _MALI_OSK_INIT_LIST_HEAD(exp) _mali_osk_list_init(exp)
+
+/** @brief Define a list variable, which is uninitialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD(exp) _mali_osk_list_t exp
+
+/** @brief Define a list variable, which is initialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD_STATIC_INIT(exp) _mali_osk_list_t exp = { &exp, &exp }
+
+/** @brief Initialize a list element.
+ *
+ * All list elements must be initialized before use.
+ *
+ * Do not use on any list element that is present in a list without using
+ * _mali_osk_list_del first, otherwise this will break the list.
+ *
+ * @param list the list element to initialize
+ */
+MALI_STATIC_INLINE void _mali_osk_list_init(_mali_osk_list_t *list)
+{
+ list->next = list;
+ list->prev = list;
+}
+
+/** @brief Insert a single list element after an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the first element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the next
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *list)
+{
+ __mali_osk_list_add(new_entry, list, list->next);
+}
+
+/** @brief Insert a single list element before an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the last element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the previous
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_addtail(_mali_osk_list_t *new_entry, _mali_osk_list_t *list)
+{
+ __mali_osk_list_add(new_entry, list->prev, list);
+}
+
+/** @brief Remove a single element from a list
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will be uninitialized, and so should not be traversed. It must be
+ * initialized before further use.
+ *
+ * @param list the list element to remove.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_del(_mali_osk_list_t *list)
+{
+ __mali_osk_list_del(list->prev, list->next);
+}
+
+/** @brief Remove a single element from a list, and re-initialize it
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will initialized, and so can be used as normal.
+ *
+ * @param list the list element to remove and initialize.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_delinit(_mali_osk_list_t *list)
+{
+ __mali_osk_list_del(list->prev, list->next);
+ _mali_osk_list_init(list);
+}
+
+/** @brief Determine whether a list is empty.
+ *
+ * An empty list is one that contains a single element that points to itself.
+ *
+ * @param list the list to check.
+ * @return non-zero if the list is empty, and zero otherwise.
+ */
+MALI_STATIC_INLINE mali_bool _mali_osk_list_empty(_mali_osk_list_t *list)
+{
+ return list->next == list;
+}
+
+/** @brief Move a list element from one list to another.
+ *
+ * The list element must be initialized.
+ *
+ * As an example, moving a list item to the head of a new list causes this item
+ * to be the first element in the new list.
+ *
+ * @param move the list element to move
+ * @param list the new list into which the element will be inserted, as the next
+ * element in the list.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move(_mali_osk_list_t *move_entry, _mali_osk_list_t *list)
+{
+ __mali_osk_list_del(move_entry->prev, move_entry->next);
+ _mali_osk_list_add(move_entry, list);
+}
+
+/** @brief Move an entire list
+ *
+ * The list element must be initialized.
+ *
+ * Allows you to move a list from one list head to another list head
+ *
+ * @param old_list The existing list head
+ * @param new_list The new list head (must be an empty list)
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move_list(_mali_osk_list_t *old_list, _mali_osk_list_t *new_list)
+{
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(new_list));
+ if (!_mali_osk_list_empty(old_list)) {
+ new_list->next = old_list->next;
+ new_list->prev = old_list->prev;
+ new_list->next->prev = new_list;
+ new_list->prev->next = new_list;
+ old_list->next = old_list;
+ old_list->prev = old_list;
+ }
+}
+
+/** @brief Find the containing structure of a list
+ *
+ * When traversing a list, this is used to recover the containing structure,
+ * given that is contains a _mali_osk_list_t member.
+ *
+ * Each list must be of structures of one type, and must link the same members
+ * together, otherwise it will not be possible to correctly recover the
+ * sturctures that the lists link.
+ *
+ * @note no type or memory checking occurs to ensure that a structure does in
+ * fact exist for the list entry, and that it is being recovered with respect
+ * to the correct list member.
+ *
+ * @param ptr the pointer to the _mali_osk_list_t member in this structure
+ * @param type the type of the structure that contains the member
+ * @param member the member of the structure that ptr points to.
+ * @return a pointer to a \a type object which contains the _mali_osk_list_t
+ * \a member, as pointed to by the _mali_osk_list_t \a *ptr.
+ */
+#define _MALI_OSK_LIST_ENTRY(ptr, type, member) \
+ _MALI_OSK_CONTAINER_OF(ptr, type, member)
+
+/** @brief Enumerate a list safely
+ *
+ * With this macro, lists can be enumerated in a 'safe' manner. That is,
+ * entries can be deleted from the list without causing an error during
+ * enumeration. To achieve this, a 'temporary' pointer is required, which must
+ * be provided to the macro.
+ *
+ * Use it like a 'for()', 'while()' or 'do()' construct, and so it must be
+ * followed by a statement or compound-statement which will be executed for
+ * each list entry.
+ *
+ * Upon loop completion, providing that an early out was not taken in the
+ * loop body, then it is guaranteed that ptr->member == list, even if the loop
+ * body never executed.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY(ptr, tmp, list, type, member) \
+ for (ptr = _MALI_OSK_LIST_ENTRY((list)->next, type, member), \
+ tmp = _MALI_OSK_LIST_ENTRY(ptr->member.next, type, member); \
+ &ptr->member != (list); \
+ ptr = tmp, \
+ tmp = _MALI_OSK_LIST_ENTRY(tmp->member.next, type, member))
+
+/** @brief Enumerate a list in reverse order safely
+ *
+ * This macro is identical to @ref _MALI_OSK_LIST_FOREACHENTRY, except that
+ * entries are enumerated in reverse order.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY_REVERSE(ptr, tmp, list, type, member) \
+ for (ptr = _MALI_OSK_LIST_ENTRY((list)->prev, type, member), \
+ tmp = _MALI_OSK_LIST_ENTRY(ptr->member.prev, type, member); \
+ &ptr->member != (list); \
+ ptr = tmp, \
+ tmp = _MALI_OSK_LIST_ENTRY(tmp->member.prev, type, member))
+
+/** @} */ /* end group _mali_osk_list */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_LIST_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_osk_mali.h b/drivers/gpu/arm/utgard/common/mali_osk_mali.h
new file mode 100644
index 000000000000..b27fb7dd36ed
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_osk_mali.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.h
+ * Defines the OS abstraction layer which is specific for the Mali kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_MALI_H__
+#define __MALI_OSK_MALI_H__
+
+#include <linux/mali/mali_utgard.h>
+#include <mali_osk.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Struct with device specific configuration data
+ */
+typedef struct mali_gpu_device_data _mali_osk_device_data;
+
+#if defined(CONFIG_MALI_DT) && !defined(CONFIG_MALI_PLAT_SPECIFIC_DT)
+/** @brief Initialize those device resources when we use device tree
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_resource_initialize(void);
+#endif
+
+/** @brief Find Mali GPU HW resource
+ *
+ * @param addr Address of Mali GPU resource to find
+ * @param res Storage for resource information if resource is found.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if resource is not found
+ */
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res);
+
+
+/** @brief Find Mali GPU HW base address
+ *
+ * @return 0 if resources are found, otherwise the Mali GPU component with lowest address.
+ */
+uintptr_t _mali_osk_resource_base_address(void);
+
+/** @brief Find the specific GPU resource.
+ *
+ * @return value
+ * 0x400 if Mali 400 specific GPU resource identified
+ * 0x450 if Mali 450 specific GPU resource identified
+ * 0x470 if Mali 470 specific GPU resource identified
+ *
+ */
+u32 _mali_osk_identify_gpu_resource(void);
+
+/** @brief Retrieve the Mali GPU specific data
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data);
+
+/** @brief Find the pmu domain config from device data.
+ *
+ * @param domain_config_array used to store pmu domain config found in device data.
+ * @param array_size is the size of array domain_config_array.
+ */
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size);
+
+/** @brief Get Mali PMU switch delay
+ *
+ *@return pmu switch delay if it is configured
+ */
+u32 _mali_osk_get_pmu_switch_delay(void);
+
+/** @brief Determines if Mali GPU has been configured with shared interrupts.
+ *
+ * @return MALI_TRUE if shared interrupts, MALI_FALSE if not.
+ */
+mali_bool _mali_osk_shared_interrupts(void);
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_MALI_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_osk_profiling.h b/drivers/gpu/arm/utgard/common/mali_osk_profiling.h
new file mode 100644
index 000000000000..10f4dc552b03
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_osk_profiling.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_OSK_PROFILING_H__
+#define __MALI_OSK_PROFILING_H__
+
+#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
+
+#include "mali_linux_trace.h"
+#include "mali_profiling_events.h"
+#include "mali_profiling_gator_api.h"
+
+#define MALI_PROFILING_MAX_BUFFER_ENTRIES 1048576
+
+#define MALI_PROFILING_NO_HW_COUNTER = ((u32)-1)
+
+/** @defgroup _mali_osk_profiling External profiling connectivity
+ * @{ */
+
+/**
+ * Initialize the profiling module.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start);
+
+/*
+ * Terminate the profiling module.
+ */
+void _mali_osk_profiling_term(void);
+
+/**
+ * Stop the profile sampling operation.
+ */
+void _mali_osk_profiling_stop_sampling(u32 pid);
+
+/**
+ * Start recording profiling data
+ *
+ * The specified limit will determine how large the capture buffer is.
+ * MALI_PROFILING_MAX_BUFFER_ENTRIES determines the maximum size allowed by the device driver.
+ *
+ * @param limit The desired maximum number of events to record on input, the actual maximum on output.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_start(u32 *limit);
+
+/**
+ * Add an profiling event
+ *
+ * @param event_id The event identificator.
+ * @param data0 First data parameter, depending on event_id specified.
+ * @param data1 Second data parameter, depending on event_id specified.
+ * @param data2 Third data parameter, depending on event_id specified.
+ * @param data3 Fourth data parameter, depending on event_id specified.
+ * @param data4 Fifth data parameter, depending on event_id specified.
+ */
+void _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
+
+/**
+ * Report a hardware counter event.
+ *
+ * @param counter_id The ID of the counter.
+ * @param value The value of the counter.
+ */
+
+/* Call Linux tracepoint directly */
+#define _mali_osk_profiling_report_hw_counter(counter_id, value) trace_mali_hw_counter(counter_id, value)
+
+/**
+ * Report SW counters
+ *
+ * @param counters array of counter values
+ */
+void _mali_osk_profiling_report_sw_counters(u32 *counters);
+
+void _mali_osk_profiling_record_global_counters(int counter_id, u32 value);
+
+/**
+ * Stop recording profiling data
+ *
+ * @param count Returns the number of recorded events.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_stop(u32 *count);
+
+/**
+ * Retrieves the number of events that can be retrieved
+ *
+ * @return The number of recorded events that can be retrieved.
+ */
+u32 _mali_osk_profiling_get_count(void);
+
+/**
+ * Retrieve an event
+ *
+ * @param index Event index (start with 0 and continue until this function fails to retrieve all events)
+ * @param timestamp The timestamp for the retrieved event will be stored here.
+ * @param event_id The event ID for the retrieved event will be stored here.
+ * @param data The 5 data values for the retrieved event will be stored here.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5]);
+
+/**
+ * Clear the recorded buffer.
+ *
+ * This is needed in order to start another recording.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_clear(void);
+
+/**
+ * Checks if a recording of profiling data is in progress
+ *
+ * @return MALI_TRUE if recording of profiling data is in progress, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_is_recording(void);
+
+/**
+ * Checks if profiling data is available for retrival
+ *
+ * @return MALI_TRUE if profiling data is avaiable, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_have_recording(void);
+
+/** @} */ /* end group _mali_osk_profiling */
+
+#else /* defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_TRACEPOINTS) */
+
+/* Dummy add_event, for when profiling is disabled. */
+
+#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4)
+
+#endif /* defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_TRACEPOINTS) */
+
+#endif /* __MALI_OSK_PROFILING_H__ */
+
+
diff --git a/drivers/gpu/arm/utgard/common/mali_osk_types.h b/drivers/gpu/arm/utgard/common/mali_osk_types.h
new file mode 100644
index 000000000000..b65ad29e16c0
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_osk_types.h
@@ -0,0 +1,471 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_types.h
+ * Defines types of the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_TYPES_H__
+#define __MALI_OSK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_osk_miscellaneous OSK Miscellaneous functions, constants and types
+ * @{ */
+
+/* Define integer types used by OSK. Note: these currently clash with Linux so we only define them if not defined already */
+#ifndef __KERNEL__
+typedef unsigned char u8;
+typedef signed char s8;
+typedef unsigned short u16;
+typedef signed short s16;
+typedef unsigned int u32;
+typedef signed int s32;
+typedef unsigned long long u64;
+#define BITS_PER_LONG (sizeof(long)*8)
+#else
+/* Ensure Linux types u32, etc. are defined */
+#include <linux/types.h>
+#endif
+
+/** @brief Mali Boolean type which uses MALI_TRUE and MALI_FALSE
+ */
+typedef unsigned long mali_bool;
+
+#ifndef MALI_TRUE
+#define MALI_TRUE ((mali_bool)1)
+#endif
+
+#ifndef MALI_FALSE
+#define MALI_FALSE ((mali_bool)0)
+#endif
+
+#define MALI_HW_CORE_NO_COUNTER ((u32)-1)
+
+
+#define MALI_S32_MAX 0x7fffffff
+
+/**
+ * @brief OSK Error codes
+ *
+ * Each OS may use its own set of error codes, and may require that the
+ * User/Kernel interface take certain error code. This means that the common
+ * error codes need to be sufficiently rich to pass the correct error code
+ * thorugh from the OSK to U/K layer, across all OSs.
+ *
+ * The result is that some error codes will appear redundant on some OSs.
+ * Under all OSs, the OSK layer must translate native OS error codes to
+ * _mali_osk_errcode_t codes. Similarly, the U/K layer must translate from
+ * _mali_osk_errcode_t codes to native OS error codes.
+ */
+typedef enum {
+ _MALI_OSK_ERR_OK = 0, /**< Success. */
+ _MALI_OSK_ERR_FAULT = -1, /**< General non-success */
+ _MALI_OSK_ERR_INVALID_FUNC = -2, /**< Invalid function requested through User/Kernel interface (e.g. bad IOCTL number) */
+ _MALI_OSK_ERR_INVALID_ARGS = -3, /**< Invalid arguments passed through User/Kernel interface */
+ _MALI_OSK_ERR_NOMEM = -4, /**< Insufficient memory */
+ _MALI_OSK_ERR_TIMEOUT = -5, /**< Timeout occurred */
+ _MALI_OSK_ERR_RESTARTSYSCALL = -6, /**< Special: On certain OSs, must report when an interruptable mutex is interrupted. Ignore otherwise. */
+ _MALI_OSK_ERR_ITEM_NOT_FOUND = -7, /**< Table Lookup failed */
+ _MALI_OSK_ERR_BUSY = -8, /**< Device/operation is busy. Try again later */
+ _MALI_OSK_ERR_UNSUPPORTED = -9, /**< Optional part of the interface used, and is unsupported */
+} _mali_osk_errcode_t;
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @defgroup _mali_osk_wq OSK work queues
+ * @{ */
+
+/** @brief Private type for work objects */
+typedef struct _mali_osk_wq_work_s _mali_osk_wq_work_t;
+typedef struct _mali_osk_wq_delayed_work_s _mali_osk_wq_delayed_work_t;
+
+/** @brief Work queue handler function
+ *
+ * This function type is called when the work is scheduled by the work queue,
+ * e.g. as an IRQ bottom-half handler.
+ *
+ * Refer to \ref _mali_osk_wq_schedule_work() for more information on the
+ * work-queue and work handlers.
+ *
+ * @param arg resource-specific data
+ */
+typedef void (*_mali_osk_wq_work_handler_t)(void *arg);
+
+/* @} */ /* end group _mali_osk_wq */
+
+/** @defgroup _mali_osk_irq OSK IRQ handling
+ * @{ */
+
+/** @brief Private type for IRQ handling objects */
+typedef struct _mali_osk_irq_t_struct _mali_osk_irq_t;
+
+/** @brief Optional function to trigger an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data */
+typedef void (*_mali_osk_irq_trigger_t)(void *arg);
+
+/** @brief Optional function to acknowledge an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was successful, or a suitable _mali_osk_errcode_t on failure. */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)(void *arg);
+
+/** @brief IRQ 'upper-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the initial handling of a
+ * resource's IRQ. This maps on to the concept of an ISR that does the minimum
+ * work necessary before handing off to an IST.
+ *
+ * The communication of the resource-specific data from the ISR to the IST is
+ * handled by the OSK implementation.
+ *
+ * On most systems, the IRQ upper-half handler executes in IRQ context.
+ * Therefore, the system may have restrictions about what can be done in this
+ * context
+ *
+ * If an IRQ upper-half handler requires more work to be done than can be
+ * acheived in an IRQ context, then it may defer the work with
+ * _mali_osk_wq_schedule_work(). Refer to \ref _mali_osk_wq_create_work() for
+ * more information.
+ *
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was correctly handled, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_uhandler_t)(void *arg);
+
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @defgroup _mali_osk_atomic OSK Atomic counters
+ * @{ */
+
+/** @brief Public type of atomic counters
+ *
+ * This is public for allocation on stack. On systems that support it, this is just a single 32-bit value.
+ * On others, it could be encapsulating an object stored elsewhere.
+ *
+ * Regardless of implementation, the \ref _mali_osk_atomic functions \b must be used
+ * for all accesses to the variable's value, even if atomicity is not required.
+ * Do not access u.val or u.obj directly.
+ */
+typedef struct {
+ union {
+ u32 val;
+ void *obj;
+ } u;
+} _mali_osk_atomic_t;
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+
+/** @brief OSK Mutual Exclusion Lock ordered list
+ *
+ * This lists the various types of locks in the system and is used to check
+ * that locks are taken in the correct order.
+ *
+ * - Holding more than one lock of the same order at the same time is not
+ * allowed.
+ * - Taking a lock of a lower order than the highest-order lock currently held
+ * is not allowed.
+ *
+ */
+typedef enum {
+ /* || Locks || */
+ /* || must be || */
+ /* _||_ taken in _||_ */
+ /* \ / this \ / */
+ /* \/ order! \/ */
+
+ _MALI_OSK_LOCK_ORDER_FIRST = 0,
+
+ _MALI_OSK_LOCK_ORDER_SESSIONS,
+ _MALI_OSK_LOCK_ORDER_MEM_SESSION,
+ _MALI_OSK_LOCK_ORDER_MEM_INFO,
+ _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE,
+ _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP,
+ _MALI_OSK_LOCK_ORDER_PM_EXECUTION,
+ _MALI_OSK_LOCK_ORDER_EXECUTOR,
+ _MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED,
+ _MALI_OSK_LOCK_ORDER_PROFILING,
+ _MALI_OSK_LOCK_ORDER_L2,
+ _MALI_OSK_LOCK_ORDER_L2_COMMAND,
+ _MALI_OSK_LOCK_ORDER_UTILIZATION,
+ _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS,
+ _MALI_OSK_LOCK_ORDER_PM_STATE,
+
+ _MALI_OSK_LOCK_ORDER_LAST,
+} _mali_osk_lock_order_t;
+
+
+/** @brief OSK Mutual Exclusion Lock flags type
+ *
+ * - Any lock can use the order parameter.
+ */
+typedef enum {
+ _MALI_OSK_LOCKFLAG_UNORDERED = 0x1, /**< Indicate that the order of this lock should not be checked */
+ _MALI_OSK_LOCKFLAG_ORDERED = 0x2,
+ /** @enum _mali_osk_lock_flags_t
+ *
+ * Flags from 0x10000--0x80000000 are RESERVED for User-mode */
+
+} _mali_osk_lock_flags_t;
+
+/** @brief Mutual Exclusion Lock Mode Optimization hint
+ *
+ * The lock mode is used to implement the read/write locking of locks when we call
+ * functions _mali_osk_mutex_rw_init/wait/signal/term/. In this case, the RO mode can
+ * be used to allow multiple concurrent readers, but no writers. The RW mode is used for
+ * writers, and so will wait for all readers to release the lock (if any present).
+ * Further readers and writers will wait until the writer releases the lock.
+ *
+ * The mode is purely an optimization hint: for example, it is permissible for
+ * all locks to behave in RW mode, regardless of that supplied.
+ *
+ * It is an error to attempt to use locks in anything other that RW mode when
+ * call functions _mali_osk_mutex_rw_wait/signal().
+ *
+ */
+typedef enum {
+ _MALI_OSK_LOCKMODE_UNDEF = -1, /**< Undefined lock mode. For internal use only */
+ _MALI_OSK_LOCKMODE_RW = 0x0, /**< Read-write mode, default. All readers and writers are mutually-exclusive */
+ _MALI_OSK_LOCKMODE_RO, /**< Read-only mode, to support multiple concurrent readers, but mutual exclusion in the presence of writers. */
+ /** @enum _mali_osk_lock_mode_t
+ *
+ * Lock modes 0x40--0x7F are RESERVED for User-mode */
+} _mali_osk_lock_mode_t;
+
+/** @brief Private types for Mutual Exclusion lock objects */
+typedef struct _mali_osk_lock_debug_s _mali_osk_lock_debug_t;
+typedef struct _mali_osk_spinlock_s _mali_osk_spinlock_t;
+typedef struct _mali_osk_spinlock_irq_s _mali_osk_spinlock_irq_t;
+typedef struct _mali_osk_mutex_s _mali_osk_mutex_t;
+typedef struct _mali_osk_mutex_rw_s _mali_osk_mutex_rw_t;
+
+/** @} */ /* end group _mali_osk_lock */
+
+/** @defgroup _mali_osk_low_level_memory OSK Low-level Memory Operations
+ * @{ */
+
+/**
+ * @brief Private data type for use in IO accesses to/from devices.
+ *
+ * This represents some range that is accessible from the device. Examples
+ * include:
+ * - Device Registers, which could be readable and/or writeable.
+ * - Memory that the device has access to, for storing configuration structures.
+ *
+ * Access to this range must be made through the _mali_osk_mem_ioread32() and
+ * _mali_osk_mem_iowrite32() functions.
+ */
+typedef struct _mali_io_address *mali_io_address;
+
+/** @defgroup _MALI_OSK_CPU_PAGE CPU Physical page size macros.
+ *
+ * The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The CPU Physical Page Size has been assumed to be the same as the Mali
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** CPU Page Order, as log to base 2 of the Page size. @see _MALI_OSK_CPU_PAGE_SIZE */
+#define _MALI_OSK_CPU_PAGE_ORDER ((u32)12)
+/** CPU Page Size, in bytes. */
+#define _MALI_OSK_CPU_PAGE_SIZE (((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER))
+/** CPU Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_CPU_PAGE_MASK (~((((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_CPU_PAGE */
+
+/** @defgroup _MALI_OSK_MALI_PAGE Mali Physical Page size macros
+ *
+ * Mali Physical page size macros. The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The Mali Physical Page Size has been assumed to be the same as the CPU
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** Mali Page Order, as log to base 2 of the Page size. @see _MALI_OSK_MALI_PAGE_SIZE */
+#define _MALI_OSK_MALI_PAGE_ORDER PAGE_SHIFT
+/** Mali Page Size, in bytes. */
+#define _MALI_OSK_MALI_PAGE_SIZE PAGE_SIZE
+/** Mali Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_MALI_PAGE_MASK PAGE_MASK
+/** @} */ /* end of group _MALI_OSK_MALI_PAGE*/
+
+/** @brief flags for mapping a user-accessible memory range
+ *
+ * Where a function with prefix '_mali_osk_mem_mapregion' accepts flags as one
+ * of the function parameters, it will use one of these. These allow per-page
+ * control over mappings. Compare with the mali_memory_allocation_flag type,
+ * which acts over an entire range
+ *
+ * These may be OR'd together with bitwise OR (|), but must be cast back into
+ * the type after OR'ing.
+ */
+typedef enum {
+ _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR = 0x1, /**< Physical address is OS Allocated */
+} _mali_osk_mem_mapregion_flags_t;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+/** @defgroup _mali_osk_notification OSK Notification Queues
+ * @{ */
+
+/** @brief Private type for notification queue objects */
+typedef struct _mali_osk_notification_queue_t_struct _mali_osk_notification_queue_t;
+
+/** @brief Public notification data object type */
+typedef struct _mali_osk_notification_t_struct {
+ u32 notification_type; /**< The notification type */
+ u32 result_buffer_size; /**< Size of the result buffer to copy to user space */
+ void *result_buffer; /**< Buffer containing any type specific data */
+} _mali_osk_notification_t;
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @defgroup _mali_osk_timer OSK Timer Callbacks
+ * @{ */
+
+/** @brief Function to call when a timer expires
+ *
+ * When a timer expires, this function is called. Note that on many systems,
+ * a timer callback will be executed in IRQ context. Therefore, restrictions
+ * may apply on what can be done inside the timer callback.
+ *
+ * If a timer requires more work to be done than can be acheived in an IRQ
+ * context, then it may defer the work with a work-queue. For example, it may
+ * use \ref _mali_osk_wq_schedule_work() to make use of a bottom-half handler
+ * to carry out the remaining work.
+ *
+ * Stopping the timer with \ref _mali_osk_timer_del() blocks on compeletion of
+ * the callback. Therefore, the callback may not obtain any mutexes also held
+ * by any callers of _mali_osk_timer_del(). Otherwise, a deadlock may occur.
+ *
+ * @param arg Function-specific data */
+typedef void (*_mali_osk_timer_callback_t)(void *arg);
+
+/** @brief Private type for Timer Callback Objects */
+typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t;
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** @brief Public List objects.
+ *
+ * To use, add a _mali_osk_list_t member to the structure that may become part
+ * of a list. When traversing the _mali_osk_list_t objects, use the
+ * _MALI_OSK_CONTAINER_OF() macro to recover the structure from its
+ *_mali_osk_list_t member
+ *
+ * Each structure may have multiple _mali_osk_list_t members, so that the
+ * structure is part of multiple lists. When traversing lists, ensure that the
+ * correct _mali_osk_list_t member is used, because type-checking will be
+ * lost by the compiler.
+ */
+typedef struct _mali_osk_list_s {
+ struct _mali_osk_list_s *next;
+ struct _mali_osk_list_s *prev;
+} _mali_osk_list_t;
+/** @} */ /* end group _mali_osk_list */
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief resource description struct
+ *
+ * Platform independent representation of a Mali HW resource
+ */
+typedef struct _mali_osk_resource {
+ const char *description; /**< short description of the resource */
+ uintptr_t base; /**< Physical base address of the resource, as seen by Mali resources. */
+ const char *irq_name; /**< Name of irq belong to this resource */
+ u32 irq; /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */
+} _mali_osk_resource_t;
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @defgroup _mali_osk_wait_queue OSK Wait Queue functionality
+ * @{ */
+/** @brief Private type for wait queue objects */
+typedef struct _mali_osk_wait_queue_t_struct _mali_osk_wait_queue_t;
+/** @} */ /* end group _mali_osk_wait_queue */
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+/** @brief Mali print ctx type which uses seq_file
+ */
+typedef struct seq_file _mali_osk_print_ctx;
+
+#define _MALI_OSK_BITMAP_INVALIDATE_INDEX -1
+
+typedef struct _mali_osk_bitmap {
+ u32 reserve;
+ u32 last;
+ u32 max;
+ u32 avail;
+ _mali_osk_spinlock_t *lock;
+ unsigned long *table;
+} _mali_osk_bitmap_t;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_TYPES_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_pm.c b/drivers/gpu/arm/utgard/common/mali_pm.c
new file mode 100644
index 000000000000..dbd94d310741
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pm.c
@@ -0,0 +1,1362 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pm.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_scheduler.h"
+#include "mali_group.h"
+#include "mali_pm_domain.h"
+#include "mali_pmu.h"
+
+#include "mali_executor.h"
+#include "mali_control_timer.h"
+
+#if defined(DEBUG)
+u32 num_pm_runtime_resume = 0;
+u32 num_pm_updates = 0;
+u32 num_pm_updates_up = 0;
+u32 num_pm_updates_down = 0;
+#endif
+
+#define MALI_PM_DOMAIN_DUMMY_MASK (1 << MALI_DOMAIN_INDEX_DUMMY)
+
+/* lock protecting power state (including pm_domains) */
+static _mali_osk_spinlock_irq_t *pm_lock_state = NULL;
+
+/* the wanted domain mask (protected by pm_lock_state) */
+static u32 pd_mask_wanted = 0;
+
+/* used to deferring the actual power changes */
+static _mali_osk_wq_work_t *pm_work = NULL;
+
+/* lock protecting power change execution */
+static _mali_osk_mutex_t *pm_lock_exec = NULL;
+
+/* PMU domains which are actually powered on (protected by pm_lock_exec) */
+static u32 pmu_mask_current = 0;
+
+/*
+ * domains which marked as powered on (protected by pm_lock_exec)
+ * This can be different from pmu_mask_current right after GPU power on
+ * if the PMU domains default to powered up.
+ */
+static u32 pd_mask_current = 0;
+
+static u16 domain_config[MALI_MAX_NUMBER_OF_DOMAINS] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1 << MALI_DOMAIN_INDEX_DUMMY
+};
+
+/* The relative core power cost */
+#define MALI_GP_COST 3
+#define MALI_PP_COST 6
+#define MALI_L2_COST 1
+
+/*
+ *We have MALI_MAX_NUMBER_OF_PP_PHYSICAL_CORES + 1 rows in this matrix
+ *because we mush store the mask of different pp cores: 0, 1, 2, 3, 4, 5, 6, 7, 8.
+ */
+static int mali_pm_domain_power_cost_result[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1][MALI_MAX_NUMBER_OF_DOMAINS];
+/*
+ * Keep track of runtime PM state, so that we know
+ * how to resume during OS resume.
+ */
+#ifdef CONFIG_PM_RUNTIME
+static mali_bool mali_pm_runtime_active = MALI_FALSE;
+#else
+/* when kernel don't enable PM_RUNTIME, set the flag always true,
+ * for GPU will not power off by runtime */
+static mali_bool mali_pm_runtime_active = MALI_TRUE;
+#endif
+
+static void mali_pm_state_lock(void);
+static void mali_pm_state_unlock(void);
+static _mali_osk_errcode_t mali_pm_create_pm_domains(void);
+static void mali_pm_set_pmu_domain_config(void);
+static u32 mali_pm_get_registered_cores_mask(void);
+static void mali_pm_update_sync_internal(void);
+static mali_bool mali_pm_common_suspend(void);
+static void mali_pm_update_work(void *data);
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask);
+const char *mali_pm_group_stats_to_string(void);
+#endif
+
+_mali_osk_errcode_t mali_pm_initialize(void)
+{
+ _mali_osk_errcode_t err;
+ struct mali_pmu_core *pmu;
+
+ pm_lock_state = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_PM_STATE);
+ if (NULL == pm_lock_state) {
+ mali_pm_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pm_lock_exec = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_PM_STATE);
+ if (NULL == pm_lock_exec) {
+ mali_pm_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pm_work = _mali_osk_wq_create_work(mali_pm_update_work, NULL);
+ if (NULL == pm_work) {
+ mali_pm_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pmu = mali_pmu_get_global_pmu_core();
+ if (NULL != pmu) {
+ /*
+ * We have a Mali PMU, set the correct domain
+ * configuration (default or custom)
+ */
+
+ u32 registered_cores_mask;
+
+ mali_pm_set_pmu_domain_config();
+
+ registered_cores_mask = mali_pm_get_registered_cores_mask();
+ mali_pmu_set_registered_cores_mask(pmu, registered_cores_mask);
+
+ MALI_DEBUG_ASSERT(0 == pd_mask_wanted);
+ }
+
+ /* Create all power domains needed (at least one dummy domain) */
+ err = mali_pm_create_pm_domains();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_terminate();
+ return err;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_pm_terminate(void)
+{
+ if (NULL != pm_work) {
+ _mali_osk_wq_delete_work(pm_work);
+ pm_work = NULL;
+ }
+
+ mali_pm_domain_terminate();
+
+ if (NULL != pm_lock_exec) {
+ _mali_osk_mutex_term(pm_lock_exec);
+ pm_lock_exec = NULL;
+ }
+
+ if (NULL != pm_lock_state) {
+ _mali_osk_spinlock_irq_term(pm_lock_state);
+ pm_lock_state = NULL;
+ }
+}
+
+struct mali_pm_domain *mali_pm_register_l2_cache(u32 domain_index,
+ struct mali_l2_cache_core *l2_cache)
+{
+ struct mali_pm_domain *domain;
+
+ domain = mali_pm_domain_get_from_mask(domain_config[domain_index]);
+ if (NULL == domain) {
+ MALI_DEBUG_ASSERT(0 == domain_config[domain_index]);
+ domain = mali_pm_domain_get_from_index(
+ MALI_DOMAIN_INDEX_DUMMY);
+ domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK;
+ } else {
+ MALI_DEBUG_ASSERT(0 != domain_config[domain_index]);
+ }
+
+ MALI_DEBUG_ASSERT(NULL != domain);
+
+ mali_pm_domain_add_l2_cache(domain, l2_cache);
+
+ return domain; /* return the actual domain this was registered in */
+}
+
+struct mali_pm_domain *mali_pm_register_group(u32 domain_index,
+ struct mali_group *group)
+{
+ struct mali_pm_domain *domain;
+
+ domain = mali_pm_domain_get_from_mask(domain_config[domain_index]);
+ if (NULL == domain) {
+ MALI_DEBUG_ASSERT(0 == domain_config[domain_index]);
+ domain = mali_pm_domain_get_from_index(
+ MALI_DOMAIN_INDEX_DUMMY);
+ domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK;
+ } else {
+ MALI_DEBUG_ASSERT(0 != domain_config[domain_index]);
+ }
+
+ MALI_DEBUG_ASSERT(NULL != domain);
+
+ mali_pm_domain_add_group(domain, group);
+
+ return domain; /* return the actual domain this was registered in */
+}
+
+mali_bool mali_pm_get_domain_refs(struct mali_pm_domain **domains,
+ struct mali_group **groups,
+ u32 num_domains)
+{
+ mali_bool ret = MALI_TRUE; /* Assume all is powered on instantly */
+ u32 i;
+
+ mali_pm_state_lock();
+
+ for (i = 0; i < num_domains; i++) {
+ MALI_DEBUG_ASSERT_POINTER(domains[i]);
+ pd_mask_wanted |= mali_pm_domain_ref_get(domains[i]);
+ if (MALI_FALSE == mali_pm_domain_power_is_on(domains[i])) {
+ /*
+ * Tell caller that the corresponding group
+ * was not already powered on.
+ */
+ ret = MALI_FALSE;
+ } else {
+ /*
+ * There is a time gap between we power on the domain and
+ * set the power state of the corresponding groups to be on.
+ */
+ if (NULL != groups[i] &&
+ MALI_FALSE == mali_group_power_is_on(groups[i])) {
+ ret = MALI_FALSE;
+ }
+ }
+ }
+
+ MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (get refs)\n", pd_mask_wanted));
+
+ mali_pm_state_unlock();
+
+ return ret;
+}
+
+mali_bool mali_pm_put_domain_refs(struct mali_pm_domain **domains,
+ u32 num_domains)
+{
+ u32 mask = 0;
+ mali_bool ret;
+ u32 i;
+
+ mali_pm_state_lock();
+
+ for (i = 0; i < num_domains; i++) {
+ MALI_DEBUG_ASSERT_POINTER(domains[i]);
+ mask |= mali_pm_domain_ref_put(domains[i]);
+ }
+
+ if (0 == mask) {
+ /* return false, all domains should still stay on */
+ ret = MALI_FALSE;
+ } else {
+ /* Assert that we are dealing with a change */
+ MALI_DEBUG_ASSERT((pd_mask_wanted & mask) == mask);
+
+ /* Update our desired domain mask */
+ pd_mask_wanted &= ~mask;
+
+ /* return true; one or more domains can now be powered down */
+ ret = MALI_TRUE;
+ }
+
+ MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (put refs)\n", pd_mask_wanted));
+
+ mali_pm_state_unlock();
+
+ return ret;
+}
+
+void mali_pm_init_begin(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ _mali_osk_pm_dev_ref_get_sync();
+
+ /* Ensure all PMU domains are on */
+ if (NULL != pmu) {
+ mali_pmu_power_up_all(pmu);
+ }
+}
+
+void mali_pm_init_end(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ /* Ensure all PMU domains are off */
+ if (NULL != pmu) {
+ mali_pmu_power_down_all(pmu);
+ }
+
+ _mali_osk_pm_dev_ref_put();
+}
+
+void mali_pm_update_sync(void)
+{
+ mali_pm_exec_lock();
+
+ if (MALI_TRUE == mali_pm_runtime_active) {
+ /*
+ * Only update if GPU is powered on.
+ * Deactivation of the last group will result in both a
+ * deferred runtime PM suspend operation and
+ * deferred execution of this function.
+ * mali_pm_runtime_active will be false if runtime PM
+ * executed first and thus the GPU is now fully powered off.
+ */
+ mali_pm_update_sync_internal();
+ }
+
+ mali_pm_exec_unlock();
+}
+
+void mali_pm_update_async(void)
+{
+ _mali_osk_wq_schedule_work(pm_work);
+}
+
+void mali_pm_os_suspend(mali_bool os_suspend)
+{
+ int ret;
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n"));
+
+ /* Suspend execution of all jobs, and go to inactive state */
+ mali_executor_suspend();
+
+ if (os_suspend) {
+ mali_control_timer_suspend(MALI_TRUE);
+ }
+
+ mali_pm_exec_lock();
+
+ ret = mali_pm_common_suspend();
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == ret);
+ MALI_IGNORE(ret);
+
+ mali_pm_exec_unlock();
+}
+
+void mali_pm_os_resume(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS resume\n"));
+
+ mali_pm_exec_lock();
+
+#if defined(DEBUG)
+ mali_pm_state_lock();
+
+ /* Assert that things are as we left them in os_suspend(). */
+ MALI_DEBUG_ASSERT(0 == pd_mask_wanted);
+ MALI_DEBUG_ASSERT(0 == pd_mask_current);
+ MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
+
+ mali_pm_state_unlock();
+#endif
+
+ if (MALI_TRUE == mali_pm_runtime_active) {
+ /* Runtime PM was active, so reset PMU */
+ if (NULL != pmu) {
+ mali_pmu_reset(pmu);
+ pmu_mask_current = mali_pmu_get_mask(pmu);
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS resume 0x%x \n", pmu_mask_current));
+ }
+
+ mali_pm_update_sync_internal();
+ }
+
+ mali_pm_exec_unlock();
+
+ /* Start executing jobs again */
+ mali_executor_resume();
+}
+
+mali_bool mali_pm_runtime_suspend(void)
+{
+ mali_bool ret;
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: Runtime suspend\n"));
+
+ mali_pm_exec_lock();
+
+ /*
+ * Put SW state directly into "off" state, and do not bother to power
+ * down each power domain, because entire GPU will be powered off
+ * when we return.
+ * For runtime PM suspend, in contrast to OS suspend, there is a race
+ * between this function and the mali_pm_update_sync_internal(), which
+ * is fine...
+ */
+ ret = mali_pm_common_suspend();
+ if (MALI_TRUE == ret) {
+ mali_pm_runtime_active = MALI_FALSE;
+ } else {
+ /*
+ * Process the "power up" instead,
+ * which could have been "lost"
+ */
+ mali_pm_update_sync_internal();
+ }
+
+ mali_pm_exec_unlock();
+
+ return ret;
+}
+
+void mali_pm_runtime_resume(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ mali_pm_exec_lock();
+
+ mali_pm_runtime_active = MALI_TRUE;
+
+#if defined(DEBUG)
+ ++num_pm_runtime_resume;
+
+ mali_pm_state_lock();
+
+ /*
+ * Assert that things are as we left them in runtime_suspend(),
+ * except for pd_mask_wanted which normally will be the reason we
+ * got here (job queued => domains wanted)
+ */
+ MALI_DEBUG_ASSERT(0 == pd_mask_current);
+ MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+ mali_pm_state_unlock();
+#endif
+
+ if (NULL != pmu) {
+ mali_pmu_reset(pmu);
+ pmu_mask_current = mali_pmu_get_mask(pmu);
+ MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume 0x%x \n", pmu_mask_current));
+ }
+
+ /*
+ * Normally we are resumed because a job has just been queued.
+ * pd_mask_wanted should thus be != 0.
+ * It is however possible for others to take a Mali Runtime PM ref
+ * without having a job queued.
+ * We should however always call mali_pm_update_sync_internal(),
+ * because this will take care of any potential mismatch between
+ * pmu_mask_current and pd_mask_current.
+ */
+ mali_pm_update_sync_internal();
+
+ mali_pm_exec_unlock();
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pm_dump_state_domain(struct mali_pm_domain *domain,
+ char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tPower domain: id %u\n",
+ mali_pm_domain_get_id(domain));
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\t\tMask: 0x%04x\n",
+ mali_pm_domain_get_mask(domain));
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\t\tUse count: %u\n",
+ mali_pm_domain_get_use_count(domain));
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\t\tCurrent power state: %s\n",
+ (mali_pm_domain_get_mask(domain) & pd_mask_current) ?
+ "On" : "Off");
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\t\tWanted power state: %s\n",
+ (mali_pm_domain_get_mask(domain) & pd_mask_wanted) ?
+ "On" : "Off");
+
+ return n;
+}
+#endif
+
+static void mali_pm_state_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(pm_lock_state);
+}
+
+static void mali_pm_state_unlock(void)
+{
+ _mali_osk_spinlock_irq_unlock(pm_lock_state);
+}
+
+void mali_pm_exec_lock(void)
+{
+ _mali_osk_mutex_wait(pm_lock_exec);
+}
+
+void mali_pm_exec_unlock(void)
+{
+ _mali_osk_mutex_signal(pm_lock_exec);
+}
+
+static void mali_pm_domain_power_up(u32 power_up_mask,
+ struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS],
+ u32 *num_groups_up,
+ struct mali_l2_cache_core *l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES],
+ u32 *num_l2_up)
+{
+ u32 domain_bit;
+ u32 notify_mask = power_up_mask;
+
+ MALI_DEBUG_ASSERT(0 != power_up_mask);
+ MALI_DEBUG_ASSERT_POINTER(groups_up);
+ MALI_DEBUG_ASSERT_POINTER(num_groups_up);
+ MALI_DEBUG_ASSERT(0 == *num_groups_up);
+ MALI_DEBUG_ASSERT_POINTER(l2_up);
+ MALI_DEBUG_ASSERT_POINTER(num_l2_up);
+ MALI_DEBUG_ASSERT(0 == *num_l2_up);
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state);
+
+ MALI_DEBUG_PRINT(5,
+ ("PM update: Powering up domains: . [%s]\n",
+ mali_pm_mask_to_string(power_up_mask)));
+
+ pd_mask_current |= power_up_mask;
+
+ domain_bit = _mali_osk_fls(notify_mask);
+ while (0 != domain_bit) {
+ u32 domain_id = domain_bit - 1;
+ struct mali_pm_domain *domain =
+ mali_pm_domain_get_from_index(
+ domain_id);
+ struct mali_l2_cache_core *l2_cache;
+ struct mali_l2_cache_core *l2_cache_tmp;
+ struct mali_group *group;
+ struct mali_group *group_tmp;
+
+ /* Mark domain as powered up */
+ mali_pm_domain_set_power_on(domain, MALI_TRUE);
+
+ /*
+ * Make a note of the L2 and/or group(s) to notify
+ * (need to release the PM state lock before doing so)
+ */
+
+ _MALI_OSK_LIST_FOREACHENTRY(l2_cache,
+ l2_cache_tmp,
+ mali_pm_domain_get_l2_cache_list(
+ domain),
+ struct mali_l2_cache_core,
+ pm_domain_list) {
+ MALI_DEBUG_ASSERT(*num_l2_up <
+ MALI_MAX_NUMBER_OF_L2_CACHE_CORES);
+ l2_up[*num_l2_up] = l2_cache;
+ (*num_l2_up)++;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group,
+ group_tmp,
+ mali_pm_domain_get_group_list(domain),
+ struct mali_group,
+ pm_domain_list) {
+ MALI_DEBUG_ASSERT(*num_groups_up <
+ MALI_MAX_NUMBER_OF_GROUPS);
+ groups_up[*num_groups_up] = group;
+
+ (*num_groups_up)++;
+ }
+
+ /* Remove current bit and find next */
+ notify_mask &= ~(1 << (domain_id));
+ domain_bit = _mali_osk_fls(notify_mask);
+ }
+}
+static void mali_pm_domain_power_down(u32 power_down_mask,
+ struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS],
+ u32 *num_groups_down,
+ struct mali_l2_cache_core *l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES],
+ u32 *num_l2_down)
+{
+ u32 domain_bit;
+ u32 notify_mask = power_down_mask;
+
+ MALI_DEBUG_ASSERT(0 != power_down_mask);
+ MALI_DEBUG_ASSERT_POINTER(groups_down);
+ MALI_DEBUG_ASSERT_POINTER(num_groups_down);
+ MALI_DEBUG_ASSERT(0 == *num_groups_down);
+ MALI_DEBUG_ASSERT_POINTER(l2_down);
+ MALI_DEBUG_ASSERT_POINTER(num_l2_down);
+ MALI_DEBUG_ASSERT(0 == *num_l2_down);
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state);
+
+ MALI_DEBUG_PRINT(5,
+ ("PM update: Powering down domains: [%s]\n",
+ mali_pm_mask_to_string(power_down_mask)));
+
+ pd_mask_current &= ~power_down_mask;
+
+ domain_bit = _mali_osk_fls(notify_mask);
+ while (0 != domain_bit) {
+ u32 domain_id = domain_bit - 1;
+ struct mali_pm_domain *domain =
+ mali_pm_domain_get_from_index(domain_id);
+ struct mali_l2_cache_core *l2_cache;
+ struct mali_l2_cache_core *l2_cache_tmp;
+ struct mali_group *group;
+ struct mali_group *group_tmp;
+
+ /* Mark domain as powered down */
+ mali_pm_domain_set_power_on(domain, MALI_FALSE);
+
+ /*
+ * Make a note of the L2s and/or groups to notify
+ * (need to release the PM state lock before doing so)
+ */
+
+ _MALI_OSK_LIST_FOREACHENTRY(l2_cache,
+ l2_cache_tmp,
+ mali_pm_domain_get_l2_cache_list(domain),
+ struct mali_l2_cache_core,
+ pm_domain_list) {
+ MALI_DEBUG_ASSERT(*num_l2_down <
+ MALI_MAX_NUMBER_OF_L2_CACHE_CORES);
+ l2_down[*num_l2_down] = l2_cache;
+ (*num_l2_down)++;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group,
+ group_tmp,
+ mali_pm_domain_get_group_list(domain),
+ struct mali_group,
+ pm_domain_list) {
+ MALI_DEBUG_ASSERT(*num_groups_down <
+ MALI_MAX_NUMBER_OF_GROUPS);
+ groups_down[*num_groups_down] = group;
+ (*num_groups_down)++;
+ }
+
+ /* Remove current bit and find next */
+ notify_mask &= ~(1 << (domain_id));
+ domain_bit = _mali_osk_fls(notify_mask);
+ }
+}
+
+/*
+ * Execute pending power domain changes
+ * pm_lock_exec lock must be taken by caller.
+ */
+static void mali_pm_update_sync_internal(void)
+{
+ /*
+ * This should only be called in non-atomic context
+ * (normally as deferred work)
+ *
+ * Look at the pending power domain changes, and execute these.
+ * Make sure group and schedulers are notified about changes.
+ */
+
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ u32 power_down_mask;
+ u32 power_up_mask;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+
+#if defined(DEBUG)
+ ++num_pm_updates;
+#endif
+
+ /* Hold PM state lock while we look at (and obey) the wanted state */
+ mali_pm_state_lock();
+
+ MALI_DEBUG_PRINT(5, ("PM update pre: Wanted domain mask: .. [%s]\n",
+ mali_pm_mask_to_string(pd_mask_wanted)));
+ MALI_DEBUG_PRINT(5, ("PM update pre: Current domain mask: . [%s]\n",
+ mali_pm_mask_to_string(pd_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM update pre: Current PMU mask: .... [%s]\n",
+ mali_pm_mask_to_string(pmu_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM update pre: Group power stats: ... <%s>\n",
+ mali_pm_group_stats_to_string()));
+
+ /* Figure out which cores we need to power on */
+ power_up_mask = pd_mask_wanted &
+ (pd_mask_wanted ^ pd_mask_current);
+
+ if (0 != power_up_mask) {
+ u32 power_up_mask_pmu;
+ struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS];
+ u32 num_groups_up = 0;
+ struct mali_l2_cache_core *
+ l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+ u32 num_l2_up = 0;
+ u32 i;
+
+#if defined(DEBUG)
+ ++num_pm_updates_up;
+#endif
+
+ /*
+ * Make sure dummy/global domain is always included when
+ * powering up, since this is controlled by runtime PM,
+ * and device power is on at this stage.
+ */
+ power_up_mask |= MALI_PM_DOMAIN_DUMMY_MASK;
+
+ /* Power up only real PMU domains */
+ power_up_mask_pmu = power_up_mask & ~MALI_PM_DOMAIN_DUMMY_MASK;
+
+ /* But not those that happen to be powered on already */
+ power_up_mask_pmu &= (power_up_mask ^ pmu_mask_current) &
+ power_up_mask;
+
+ if (0 != power_up_mask_pmu) {
+ MALI_DEBUG_ASSERT(NULL != pmu);
+ pmu_mask_current |= power_up_mask_pmu;
+ mali_pmu_power_up(pmu, power_up_mask_pmu);
+ }
+
+ /*
+ * Put the domains themselves in power up state.
+ * We get the groups and L2s to notify in return.
+ */
+ mali_pm_domain_power_up(power_up_mask,
+ groups_up, &num_groups_up,
+ l2_up, &num_l2_up);
+
+ /* Need to unlock PM state lock before notifying L2 + groups */
+ mali_pm_state_unlock();
+
+ /* Notify each L2 cache that we have be powered up */
+ for (i = 0; i < num_l2_up; i++) {
+ mali_l2_cache_power_up(l2_up[i]);
+ }
+
+ /*
+ * Tell execution module about all the groups we have
+ * powered up. Groups will be notified as a result of this.
+ */
+ mali_executor_group_power_up(groups_up, num_groups_up);
+
+ /* Lock state again before checking for power down */
+ mali_pm_state_lock();
+ }
+
+ /* Figure out which cores we need to power off */
+ power_down_mask = pd_mask_current &
+ (pd_mask_wanted ^ pd_mask_current);
+
+ /*
+ * Never power down the dummy/global domain here. This is to be done
+ * from a suspend request (since this domain is only physicall powered
+ * down at that point)
+ */
+ power_down_mask &= ~MALI_PM_DOMAIN_DUMMY_MASK;
+
+ if (0 != power_down_mask) {
+ u32 power_down_mask_pmu;
+ struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS];
+ u32 num_groups_down = 0;
+ struct mali_l2_cache_core *
+ l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+ u32 num_l2_down = 0;
+ u32 i;
+
+#if defined(DEBUG)
+ ++num_pm_updates_down;
+#endif
+
+ /*
+ * Put the domains themselves in power down state.
+ * We get the groups and L2s to notify in return.
+ */
+ mali_pm_domain_power_down(power_down_mask,
+ groups_down, &num_groups_down,
+ l2_down, &num_l2_down);
+
+ /* Need to unlock PM state lock before notifying L2 + groups */
+ mali_pm_state_unlock();
+
+ /*
+ * Tell execution module about all the groups we will be
+ * powering down. Groups will be notified as a result of this.
+ */
+ if (0 < num_groups_down) {
+ mali_executor_group_power_down(groups_down, num_groups_down);
+ }
+
+ /* Notify each L2 cache that we will be powering down */
+ for (i = 0; i < num_l2_down; i++) {
+ mali_l2_cache_power_down(l2_down[i]);
+ }
+
+ /*
+ * Power down only PMU domains which should not stay on
+ * Some domains might for instance currently be incorrectly
+ * powered up if default domain power state is all on.
+ */
+ power_down_mask_pmu = pmu_mask_current & (~pd_mask_current);
+
+ if (0 != power_down_mask_pmu) {
+ MALI_DEBUG_ASSERT(NULL != pmu);
+ pmu_mask_current &= ~power_down_mask_pmu;
+ mali_pmu_power_down(pmu, power_down_mask_pmu);
+
+ }
+ } else {
+ /*
+ * Power down only PMU domains which should not stay on
+ * Some domains might for instance currently be incorrectly
+ * powered up if default domain power state is all on.
+ */
+ u32 power_down_mask_pmu;
+
+ /* No need for state lock since we'll only update PMU */
+ mali_pm_state_unlock();
+
+ power_down_mask_pmu = pmu_mask_current & (~pd_mask_current);
+
+ if (0 != power_down_mask_pmu) {
+ MALI_DEBUG_ASSERT(NULL != pmu);
+ pmu_mask_current &= ~power_down_mask_pmu;
+ mali_pmu_power_down(pmu, power_down_mask_pmu);
+ }
+ }
+
+ MALI_DEBUG_PRINT(5, ("PM update post: Current domain mask: . [%s]\n",
+ mali_pm_mask_to_string(pd_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM update post: Current PMU mask: .... [%s]\n",
+ mali_pm_mask_to_string(pmu_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM update post: Group power stats: ... <%s>\n",
+ mali_pm_group_stats_to_string()));
+}
+
+static mali_bool mali_pm_common_suspend(void)
+{
+ mali_pm_state_lock();
+
+ if (0 != pd_mask_wanted) {
+ MALI_DEBUG_PRINT(5, ("PM: Aborting suspend operation\n\n\n"));
+ mali_pm_state_unlock();
+ return MALI_FALSE;
+ }
+
+ MALI_DEBUG_PRINT(5, ("PM suspend pre: Wanted domain mask: .. [%s]\n",
+ mali_pm_mask_to_string(pd_mask_wanted)));
+ MALI_DEBUG_PRINT(5, ("PM suspend pre: Current domain mask: . [%s]\n",
+ mali_pm_mask_to_string(pd_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM suspend pre: Current PMU mask: .... [%s]\n",
+ mali_pm_mask_to_string(pmu_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM suspend pre: Group power stats: ... <%s>\n",
+ mali_pm_group_stats_to_string()));
+
+ if (0 != pd_mask_current) {
+ /*
+ * We have still some domains powered on.
+ * It is for instance very normal that at least the
+ * dummy/global domain is marked as powered on at this point.
+ * (because it is physically powered on until this function
+ * returns)
+ */
+
+ struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS];
+ u32 num_groups_down = 0;
+ struct mali_l2_cache_core *
+ l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+ u32 num_l2_down = 0;
+ u32 i;
+
+ /*
+ * Put the domains themselves in power down state.
+ * We get the groups and L2s to notify in return.
+ */
+ mali_pm_domain_power_down(pd_mask_current,
+ groups_down,
+ &num_groups_down,
+ l2_down,
+ &num_l2_down);
+
+ MALI_DEBUG_ASSERT(0 == pd_mask_current);
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
+
+ /* Need to unlock PM state lock before notifying L2 + groups */
+ mali_pm_state_unlock();
+
+ /*
+ * Tell execution module about all the groups we will be
+ * powering down. Groups will be notified as a result of this.
+ */
+ if (0 < num_groups_down) {
+ mali_executor_group_power_down(groups_down, num_groups_down);
+ }
+
+ /* Notify each L2 cache that we will be powering down */
+ for (i = 0; i < num_l2_down; i++) {
+ mali_l2_cache_power_down(l2_down[i]);
+ }
+
+ pmu_mask_current = 0;
+ } else {
+ MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
+
+ mali_pm_state_unlock();
+ }
+
+ MALI_DEBUG_PRINT(5, ("PM suspend post: Current domain mask: [%s]\n",
+ mali_pm_mask_to_string(pd_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM suspend post: Current PMU mask: ... [%s]\n",
+ mali_pm_mask_to_string(pmu_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM suspend post: Group power stats: .. <%s>\n",
+ mali_pm_group_stats_to_string()));
+
+ return MALI_TRUE;
+}
+
+static void mali_pm_update_work(void *data)
+{
+ MALI_IGNORE(data);
+ mali_pm_update_sync();
+}
+
+static _mali_osk_errcode_t mali_pm_create_pm_domains(void)
+{
+ int i;
+
+ /* Create all domains (including dummy domain) */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0x0 == domain_config[i]) continue;
+
+ if (NULL == mali_pm_domain_create(domain_config[i])) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static void mali_pm_set_default_pm_domain_config(void)
+{
+ MALI_DEBUG_ASSERT(0 != _mali_osk_resource_base_address());
+
+ /* GP core */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_GP, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_GP] = 0x01;
+ }
+
+ /* PP0 - PP3 core */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP0, NULL)) {
+ if (mali_is_mali400()) {
+ domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 2;
+ } else if (mali_is_mali450()) {
+ domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 1;
+ } else if (mali_is_mali470()) {
+ domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 0;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP1, NULL)) {
+ if (mali_is_mali400()) {
+ domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 3;
+ } else if (mali_is_mali450()) {
+ domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 2;
+ } else if (mali_is_mali470()) {
+ domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 1;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP2, NULL)) {
+ if (mali_is_mali400()) {
+ domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 4;
+ } else if (mali_is_mali450()) {
+ domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 2;
+ } else if (mali_is_mali470()) {
+ domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 1;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP3, NULL)) {
+ if (mali_is_mali400()) {
+ domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 5;
+ } else if (mali_is_mali450()) {
+ domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 2;
+ } else if (mali_is_mali470()) {
+ domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 1;
+ }
+ }
+
+ /* PP4 - PP7 */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP4, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_PP4] = 0x01 << 3;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP5, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_PP5] = 0x01 << 3;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP6, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_PP6] = 0x01 << 3;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP7, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_PP7] = 0x01 << 3;
+ }
+
+ /* L2gp/L2PP0/L2PP4 */
+ if (mali_is_mali400()) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI400_OFFSET_L2_CACHE0, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 1;
+ }
+ } else if (mali_is_mali450()) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI450_OFFSET_L2_CACHE0, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 0;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI450_OFFSET_L2_CACHE1, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L21] = 0x01 << 1;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI450_OFFSET_L2_CACHE2, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L22] = 0x01 << 3;
+ }
+ } else if (mali_is_mali470()) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI470_OFFSET_L2_CACHE1, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L21] = 0x01 << 0;
+ }
+ }
+}
+
+static u32 mali_pm_get_registered_cores_mask(void)
+{
+ int i = 0;
+ u32 mask = 0;
+
+ for (i = 0; i < MALI_DOMAIN_INDEX_DUMMY; i++) {
+ mask |= domain_config[i];
+ }
+
+ return mask;
+}
+
+static void mali_pm_set_pmu_domain_config(void)
+{
+ int i = 0;
+
+ _mali_osk_device_data_pmu_config_get(domain_config, MALI_MAX_NUMBER_OF_DOMAINS - 1);
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS - 1; i++) {
+ if (0 != domain_config[i]) {
+ MALI_DEBUG_PRINT(2, ("Using customer pmu config:\n"));
+ break;
+ }
+ }
+
+ if (MALI_MAX_NUMBER_OF_DOMAINS - 1 == i) {
+ MALI_DEBUG_PRINT(2, ("Using hw detect pmu config:\n"));
+ mali_pm_set_default_pm_domain_config();
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS - 1; i++) {
+ if (domain_config[i]) {
+ MALI_DEBUG_PRINT(2, ("domain_config[%d] = 0x%x \n", i, domain_config[i]));
+ }
+ }
+ /* Can't override dummy domain mask */
+ domain_config[MALI_DOMAIN_INDEX_DUMMY] =
+ 1 << MALI_DOMAIN_INDEX_DUMMY;
+}
+
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask)
+{
+ static char bit_str[MALI_MAX_NUMBER_OF_DOMAINS + 1];
+ int bit;
+ int str_pos = 0;
+
+ /* Must be protected by lock since we use shared string buffer */
+ if (NULL != pm_lock_exec) {
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+ }
+
+ for (bit = MALI_MAX_NUMBER_OF_DOMAINS - 1; bit >= 0; bit--) {
+ if (mask & (1 << bit)) {
+ bit_str[str_pos] = 'X';
+ } else {
+ bit_str[str_pos] = '-';
+ }
+ str_pos++;
+ }
+
+ bit_str[MALI_MAX_NUMBER_OF_DOMAINS] = '\0';
+
+ return bit_str;
+}
+
+const char *mali_pm_group_stats_to_string(void)
+{
+ static char bit_str[MALI_MAX_NUMBER_OF_GROUPS + 1];
+ u32 num_groups = mali_group_get_glob_num_groups();
+ u32 i;
+
+ /* Must be protected by lock since we use shared string buffer */
+ if (NULL != pm_lock_exec) {
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+ }
+
+ for (i = 0; i < num_groups && i < MALI_MAX_NUMBER_OF_GROUPS; i++) {
+ struct mali_group *group;
+
+ group = mali_group_get_glob_group(i);
+
+ if (MALI_TRUE == mali_group_power_is_on(group)) {
+ bit_str[i] = 'X';
+ } else {
+ bit_str[i] = '-';
+ }
+ }
+
+ bit_str[i] = '\0';
+
+ return bit_str;
+}
+#endif
+
+/*
+ * num_pp is the number of PP cores which will be powered on given this mask
+ * cost is the total power cost of cores which will be powered on given this mask
+ */
+static void mali_pm_stat_from_mask(u32 mask, u32 *num_pp, u32 *cost)
+{
+ u32 i;
+
+ /* loop through all cores */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (!(domain_config[i] & mask)) {
+ continue;
+ }
+
+ switch (i) {
+ case MALI_DOMAIN_INDEX_GP:
+ *cost += MALI_GP_COST;
+
+ break;
+ case MALI_DOMAIN_INDEX_PP0: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP1: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP2: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP3:
+ if (mali_is_mali400()) {
+ if ((domain_config[MALI_DOMAIN_INDEX_L20] & mask)
+ || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+ == domain_config[MALI_DOMAIN_INDEX_L20])) {
+ *num_pp += 1;
+ }
+ } else {
+ if ((domain_config[MALI_DOMAIN_INDEX_L21] & mask)
+ || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+ == domain_config[MALI_DOMAIN_INDEX_L21])) {
+ *num_pp += 1;
+ }
+ }
+
+ *cost += MALI_PP_COST;
+ break;
+ case MALI_DOMAIN_INDEX_PP4: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP5: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP6: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP7:
+ MALI_DEBUG_ASSERT(mali_is_mali450());
+
+ if ((domain_config[MALI_DOMAIN_INDEX_L22] & mask)
+ || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+ == domain_config[MALI_DOMAIN_INDEX_L22])) {
+ *num_pp += 1;
+ }
+
+ *cost += MALI_PP_COST;
+ break;
+ case MALI_DOMAIN_INDEX_L20: /* Fall through */
+ case MALI_DOMAIN_INDEX_L21: /* Fall through */
+ case MALI_DOMAIN_INDEX_L22:
+ *cost += MALI_L2_COST;
+
+ break;
+ }
+ }
+}
+
+void mali_pm_power_cost_setup(void)
+{
+ /*
+ * Two parallel arrays which store the best domain mask and its cost
+ * The index is the number of PP cores, E.g. Index 0 is for 1 PP option,
+ * might have mask 0x2 and with cost of 1, lower cost is better
+ */
+ u32 best_mask[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 };
+ u32 best_cost[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 };
+ /* Array cores_in_domain is used to store the total pp cores in each pm domain. */
+ u32 cores_in_domain[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+ /* Domain_count is used to represent the max domain we have.*/
+ u32 max_domain_mask = 0;
+ u32 max_domain_id = 0;
+ u32 always_on_pp_cores = 0;
+
+ u32 num_pp, cost, mask;
+ u32 i, j , k;
+
+ /* Initialize statistics */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; i++) {
+ best_mask[i] = 0;
+ best_cost[i] = 0xFFFFFFFF; /* lower cost is better */
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1; i++) {
+ for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) {
+ mali_pm_domain_power_cost_result[i][j] = 0;
+ }
+ }
+
+ /* Caculate number of pp cores of a given domain config. */
+ for (i = MALI_DOMAIN_INDEX_PP0; i <= MALI_DOMAIN_INDEX_PP7; i++) {
+ if (0 < domain_config[i]) {
+ /* Get the max domain mask value used to caculate power cost
+ * and we don't count in always on pp cores. */
+ if (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i]
+ && max_domain_mask < domain_config[i]) {
+ max_domain_mask = domain_config[i];
+ }
+
+ if (MALI_PM_DOMAIN_DUMMY_MASK == domain_config[i]) {
+ always_on_pp_cores++;
+ }
+ }
+ }
+ max_domain_id = _mali_osk_fls(max_domain_mask);
+
+ /*
+ * Try all combinations of power domains and check how many PP cores
+ * they have and their power cost.
+ */
+ for (mask = 0; mask < (1 << max_domain_id); mask++) {
+ num_pp = 0;
+ cost = 0;
+
+ mali_pm_stat_from_mask(mask, &num_pp, &cost);
+
+ /* This mask is usable for all MP1 up to num_pp PP cores, check statistics for all */
+ for (i = 0; i < num_pp; i++) {
+ if (best_cost[i] >= cost) {
+ best_cost[i] = cost;
+ best_mask[i] = mask;
+ }
+ }
+ }
+
+ /*
+ * If we want to enable x pp cores, if x is less than number of always_on pp cores,
+ * all of pp cores we will enable must be always_on pp cores.
+ */
+ for (i = 0; i < mali_executor_get_num_cores_total(); i++) {
+ if (i < always_on_pp_cores) {
+ mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1]
+ = i + 1;
+ } else {
+ mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1]
+ = always_on_pp_cores;
+ }
+ }
+
+ /* In this loop, variable i represent for the number of non-always on pp cores we want to enabled. */
+ for (i = 0; i < (mali_executor_get_num_cores_total() - always_on_pp_cores); i++) {
+ if (best_mask[i] == 0) {
+ /* This MP variant is not available */
+ continue;
+ }
+
+ for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) {
+ cores_in_domain[j] = 0;
+ }
+
+ for (j = MALI_DOMAIN_INDEX_PP0; j <= MALI_DOMAIN_INDEX_PP7; j++) {
+ if (0 < domain_config[j]
+ && (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i])) {
+ cores_in_domain[_mali_osk_fls(domain_config[j]) - 1]++;
+ }
+ }
+
+ /* In this loop, j represent for the number we have already enabled.*/
+ for (j = 0; j <= i;) {
+ /* j used to visit all of domain to get the number of pp cores remained in it. */
+ for (k = 0; k < max_domain_id; k++) {
+ /* If domain k in best_mask[i] is enabled and this domain has extra pp cores,
+ * we know we must pick at least one pp core from this domain.
+ * And then we move to next enabled pm domain. */
+ if ((best_mask[i] & (0x1 << k)) && (0 < cores_in_domain[k])) {
+ cores_in_domain[k]--;
+ mali_pm_domain_power_cost_result[always_on_pp_cores + i + 1][k]++;
+ j++;
+ if (j > i) {
+ break;
+ }
+ }
+ }
+ }
+ }
+}
+
+/*
+ * When we are doing core scaling,
+ * this function is called to return the best mask to
+ * achieve the best pp group power cost.
+ */
+void mali_pm_get_best_power_cost_mask(int num_requested, int *dst)
+{
+ MALI_DEBUG_ASSERT((mali_executor_get_num_cores_total() >= num_requested) && (0 <= num_requested));
+
+ _mali_osk_memcpy(dst, mali_pm_domain_power_cost_result[num_requested], MALI_MAX_NUMBER_OF_DOMAINS * sizeof(int));
+}
+
+u32 mali_pm_get_current_mask(void)
+{
+ return pd_mask_current;
+}
+
+u32 mali_pm_get_wanted_mask(void)
+{
+ return pd_mask_wanted;
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_pm.h b/drivers/gpu/arm/utgard/common/mali_pm.h
new file mode 100644
index 000000000000..d72c732e698d
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pm.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_H__
+#define __MALI_PM_H__
+
+#include "mali_osk.h"
+#include "mali_pm_domain.h"
+
+#define MALI_DOMAIN_INDEX_GP 0
+#define MALI_DOMAIN_INDEX_PP0 1
+#define MALI_DOMAIN_INDEX_PP1 2
+#define MALI_DOMAIN_INDEX_PP2 3
+#define MALI_DOMAIN_INDEX_PP3 4
+#define MALI_DOMAIN_INDEX_PP4 5
+#define MALI_DOMAIN_INDEX_PP5 6
+#define MALI_DOMAIN_INDEX_PP6 7
+#define MALI_DOMAIN_INDEX_PP7 8
+#define MALI_DOMAIN_INDEX_L20 9
+#define MALI_DOMAIN_INDEX_L21 10
+#define MALI_DOMAIN_INDEX_L22 11
+/*
+ * The dummy domain is used when there is no physical power domain
+ * (e.g. no PMU or always on cores)
+ */
+#define MALI_DOMAIN_INDEX_DUMMY 12
+#define MALI_MAX_NUMBER_OF_DOMAINS 13
+
+/**
+ * Initialize the Mali PM module
+ *
+ * PM module covers Mali PM core, PM domains and Mali PMU
+ */
+_mali_osk_errcode_t mali_pm_initialize(void);
+
+/**
+ * Terminate the Mali PM module
+ */
+void mali_pm_terminate(void);
+
+void mali_pm_exec_lock(void);
+void mali_pm_exec_unlock(void);
+
+
+struct mali_pm_domain *mali_pm_register_l2_cache(u32 domain_index,
+ struct mali_l2_cache_core *l2_cache);
+struct mali_pm_domain *mali_pm_register_group(u32 domain_index,
+ struct mali_group *group);
+
+mali_bool mali_pm_get_domain_refs(struct mali_pm_domain **domains,
+ struct mali_group **groups,
+ u32 num_domains);
+mali_bool mali_pm_put_domain_refs(struct mali_pm_domain **domains,
+ u32 num_domains);
+
+void mali_pm_init_begin(void);
+void mali_pm_init_end(void);
+
+void mali_pm_update_sync(void);
+void mali_pm_update_async(void);
+
+/* Callback functions for system power management */
+void mali_pm_os_suspend(mali_bool os_suspend);
+void mali_pm_os_resume(void);
+
+mali_bool mali_pm_runtime_suspend(void);
+void mali_pm_runtime_resume(void);
+
+#if MALI_STATE_TRACKING
+u32 mali_pm_dump_state_domain(struct mali_pm_domain *domain,
+ char *buf, u32 size);
+#endif
+
+void mali_pm_power_cost_setup(void);
+
+void mali_pm_get_best_power_cost_mask(int num_requested, int *dst);
+
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask);
+#endif
+
+u32 mali_pm_get_current_mask(void);
+u32 mali_pm_get_wanted_mask(void);
+#endif /* __MALI_PM_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_pm_domain.c b/drivers/gpu/arm/utgard/common/mali_pm_domain.c
new file mode 100644
index 000000000000..dbf985e6d37b
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pm_domain.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_pm_domain.h"
+#include "mali_pmu.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+
+static struct mali_pm_domain *mali_pm_domains[MALI_MAX_NUMBER_OF_DOMAINS] =
+{ NULL, };
+
+void mali_pm_domain_initialize(void)
+{
+ /* Domains will be initialized/created on demand */
+}
+
+void mali_pm_domain_terminate(void)
+{
+ int i;
+
+ /* Delete all domains that has been created */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ mali_pm_domain_delete(mali_pm_domains[i]);
+ mali_pm_domains[i] = NULL;
+ }
+}
+
+struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask)
+{
+ struct mali_pm_domain *domain = NULL;
+ u32 domain_id = 0;
+
+ domain = mali_pm_domain_get_from_mask(pmu_mask);
+ if (NULL != domain) return domain;
+
+ MALI_DEBUG_PRINT(2,
+ ("Mali PM domain: Creating Mali PM domain (mask=0x%08X)\n",
+ pmu_mask));
+
+ domain = (struct mali_pm_domain *)_mali_osk_malloc(
+ sizeof(struct mali_pm_domain));
+ if (NULL != domain) {
+ domain->power_is_on = MALI_FALSE;
+ domain->pmu_mask = pmu_mask;
+ domain->use_count = 0;
+ _mali_osk_list_init(&domain->group_list);
+ _mali_osk_list_init(&domain->l2_cache_list);
+
+ domain_id = _mali_osk_fls(pmu_mask) - 1;
+ /* Verify the domain_id */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > domain_id);
+ /* Verify that pmu_mask only one bit is set */
+ MALI_DEBUG_ASSERT((1 << domain_id) == pmu_mask);
+ mali_pm_domains[domain_id] = domain;
+
+ return domain;
+ } else {
+ MALI_DEBUG_PRINT_ERROR(("Unable to create PM domain\n"));
+ }
+
+ return NULL;
+}
+
+void mali_pm_domain_delete(struct mali_pm_domain *domain)
+{
+ if (NULL == domain) {
+ return;
+ }
+
+ _mali_osk_list_delinit(&domain->group_list);
+ _mali_osk_list_delinit(&domain->l2_cache_list);
+
+ _mali_osk_free(domain);
+}
+
+void mali_pm_domain_add_group(struct mali_pm_domain *domain,
+ struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ /*
+ * Use addtail because virtual group is created last and it needs
+ * to be at the end of the list (in order to be activated after
+ * all children.
+ */
+ _mali_osk_list_addtail(&group->pm_domain_list, &domain->group_list);
+}
+
+void mali_pm_domain_add_l2_cache(struct mali_pm_domain *domain,
+ struct mali_l2_cache_core *l2_cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ MALI_DEBUG_ASSERT_POINTER(l2_cache);
+ _mali_osk_list_add(&l2_cache->pm_domain_list, &domain->l2_cache_list);
+}
+
+struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask)
+{
+ u32 id = 0;
+
+ if (0 == mask) {
+ return NULL;
+ }
+
+ id = _mali_osk_fls(mask) - 1;
+
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+ /* Verify that pmu_mask only one bit is set */
+ MALI_DEBUG_ASSERT((1 << id) == mask);
+
+ return mali_pm_domains[id];
+}
+
+struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id)
+{
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+
+ return mali_pm_domains[id];
+}
+
+u32 mali_pm_domain_ref_get(struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+
+ if (0 == domain->use_count) {
+ _mali_osk_pm_dev_ref_get_async();
+ }
+
+ ++domain->use_count;
+ MALI_DEBUG_PRINT(4, ("PM domain %p: ref_get, use_count => %u\n", domain, domain->use_count));
+
+ /* Return our mask so caller can check this against wanted mask */
+ return domain->pmu_mask;
+}
+
+u32 mali_pm_domain_ref_put(struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+
+ --domain->use_count;
+ MALI_DEBUG_PRINT(4, ("PM domain %p: ref_put, use_count => %u\n", domain, domain->use_count));
+
+ if (0 == domain->use_count) {
+ _mali_osk_pm_dev_ref_put();
+ }
+
+ /*
+ * Return the PMU mask which now could be be powered down
+ * (the bit for this domain).
+ * This is the responsibility of the caller (mali_pm)
+ */
+ return (0 == domain->use_count ? domain->pmu_mask : 0);
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pm_domain_get_id(struct mali_pm_domain *domain)
+{
+ u32 id = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ MALI_DEBUG_ASSERT(0 != domain->pmu_mask);
+
+ id = _mali_osk_fls(domain->pmu_mask) - 1;
+
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+ /* Verify that pmu_mask only one bit is set */
+ MALI_DEBUG_ASSERT((1 << id) == domain->pmu_mask);
+ /* Verify that we have stored the domain at right id/index */
+ MALI_DEBUG_ASSERT(domain == mali_pm_domains[id]);
+
+ return id;
+}
+#endif
+
+#if defined(DEBUG)
+mali_bool mali_pm_domain_all_unused(void)
+{
+ int i;
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (NULL == mali_pm_domains[i]) {
+ /* Nothing to check */
+ continue;
+ }
+
+ if (MALI_TRUE == mali_pm_domains[i]->power_is_on) {
+ /* Not ready for suspend! */
+ return MALI_FALSE;
+ }
+
+ if (0 != mali_pm_domains[i]->use_count) {
+ /* Not ready for suspend! */
+ return MALI_FALSE;
+ }
+ }
+
+ return MALI_TRUE;
+}
+#endif
diff --git a/drivers/gpu/arm/utgard/common/mali_pm_domain.h b/drivers/gpu/arm/utgard/common/mali_pm_domain.h
new file mode 100644
index 000000000000..aceb3449359a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pm_domain.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_DOMAIN_H__
+#define __MALI_PM_DOMAIN_H__
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+#include "mali_l2_cache.h"
+#include "mali_group.h"
+#include "mali_pmu.h"
+
+/* Instances are protected by PM state lock */
+struct mali_pm_domain {
+ mali_bool power_is_on;
+ s32 use_count;
+ u32 pmu_mask;
+
+ /* Zero or more groups can belong to this domain */
+ _mali_osk_list_t group_list;
+
+ /* Zero or more L2 caches can belong to this domain */
+ _mali_osk_list_t l2_cache_list;
+};
+
+
+void mali_pm_domain_initialize(void);
+void mali_pm_domain_terminate(void);
+
+struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask);
+void mali_pm_domain_delete(struct mali_pm_domain *domain);
+
+void mali_pm_domain_add_l2_cache(
+ struct mali_pm_domain *domain,
+ struct mali_l2_cache_core *l2_cache);
+void mali_pm_domain_add_group(struct mali_pm_domain *domain,
+ struct mali_group *group);
+
+struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask);
+struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id);
+
+/* Ref counting */
+u32 mali_pm_domain_ref_get(struct mali_pm_domain *domain);
+u32 mali_pm_domain_ref_put(struct mali_pm_domain *domain);
+
+MALI_STATIC_INLINE _mali_osk_list_t *mali_pm_domain_get_group_list(
+ struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return &domain->group_list;
+}
+
+MALI_STATIC_INLINE _mali_osk_list_t *mali_pm_domain_get_l2_cache_list(
+ struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return &domain->l2_cache_list;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pm_domain_power_is_on(
+ struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return domain->power_is_on;
+}
+
+MALI_STATIC_INLINE void mali_pm_domain_set_power_on(
+ struct mali_pm_domain *domain,
+ mali_bool power_is_on)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ domain->power_is_on = power_is_on;
+}
+
+MALI_STATIC_INLINE u32 mali_pm_domain_get_use_count(
+ struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return domain->use_count;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pm_domain_get_id(struct mali_pm_domain *domain);
+
+MALI_STATIC_INLINE u32 mali_pm_domain_get_mask(struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return domain->pmu_mask;
+}
+#endif
+
+#if defined(DEBUG)
+mali_bool mali_pm_domain_all_unused(void);
+#endif
+
+#endif /* __MALI_PM_DOMAIN_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_pmu.c b/drivers/gpu/arm/utgard/common/mali_pmu.c
new file mode 100644
index 000000000000..2a3008a6dd83
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pmu.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu.c
+ * Mali driver functions for Mali 400 PMU hardware
+ */
+#include "mali_hw_core.h"
+#include "mali_pmu.h"
+#include "mali_pp.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_pm.h"
+#include "mali_osk_mali.h"
+
+struct mali_pmu_core *mali_global_pmu_core = NULL;
+
+static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(
+ struct mali_pmu_core *pmu);
+
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource)
+{
+ struct mali_pmu_core *pmu;
+
+ MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core);
+ MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n"));
+
+ pmu = (struct mali_pmu_core *)_mali_osk_malloc(
+ sizeof(struct mali_pmu_core));
+ if (NULL != pmu) {
+ pmu->registered_cores_mask = 0; /* to be set later */
+
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core,
+ resource, PMU_REGISTER_ADDRESS_SPACE_SIZE)) {
+
+ pmu->switch_delay = _mali_osk_get_pmu_switch_delay();
+
+ mali_global_pmu_core = pmu;
+
+ return pmu;
+ }
+ _mali_osk_free(pmu);
+ }
+
+ return NULL;
+}
+
+void mali_pmu_delete(struct mali_pmu_core *pmu)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu == mali_global_pmu_core);
+
+ MALI_DEBUG_PRINT(2, ("Mali PMU: Deleting Mali PMU core\n"));
+
+ mali_global_pmu_core = NULL;
+
+ mali_hw_core_delete(&pmu->hw_core);
+ _mali_osk_free(pmu);
+}
+
+void mali_pmu_set_registered_cores_mask(struct mali_pmu_core *pmu, u32 mask)
+{
+ pmu->registered_cores_mask = mask;
+}
+
+void mali_pmu_reset(struct mali_pmu_core *pmu)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ /* Setup the desired defaults */
+ mali_hw_core_register_write_relaxed(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_MASK, 0);
+ mali_hw_core_register_write_relaxed(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
+}
+
+void mali_pmu_power_up_all(struct mali_pmu_core *pmu)
+{
+ u32 stat;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ mali_pm_exec_lock();
+
+ mali_pmu_reset(pmu);
+
+ /* Now simply power up the domains which are marked as powered down */
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ mali_pmu_power_up(pmu, stat);
+
+ mali_pm_exec_unlock();
+}
+
+void mali_pmu_power_down_all(struct mali_pmu_core *pmu)
+{
+ u32 stat;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ mali_pm_exec_lock();
+
+ /* Now simply power down the domains which are marked as powered up */
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ mali_pmu_power_down(pmu, (~stat) & pmu->registered_cores_mask);
+
+ mali_pm_exec_unlock();
+}
+
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask)
+{
+ u32 stat;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+ MALI_DEBUG_ASSERT(mask <= pmu->registered_cores_mask);
+ MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT) &
+ PMU_REG_VAL_IRQ));
+
+ MALI_DEBUG_PRINT(3,
+ ("PMU power down: ...................... [%s]\n",
+ mali_pm_mask_to_string(mask)));
+
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+
+ /*
+ * Assert that we are not powering down domains which are already
+ * powered down.
+ */
+ MALI_DEBUG_ASSERT(0 == (stat & mask));
+
+ mask &= ~(0x1 << MALI_DOMAIN_INDEX_DUMMY);
+
+ if (0 == mask || 0 == ((~stat) & mask)) return _MALI_OSK_ERR_OK;
+
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_POWER_DOWN, mask);
+
+ /*
+ * Do not wait for interrupt on Mali-300/400 if all domains are
+ * powered off by our power down command, because the HW will simply
+ * not generate an interrupt in this case.
+ */
+ if (mali_is_mali450() || mali_is_mali470() || pmu->registered_cores_mask != (mask | stat)) {
+ err = mali_pmu_wait_for_command_finish(pmu);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+ } else {
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+ }
+
+#if defined(DEBUG)
+ /* Verify power status of domains after power down */
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ MALI_DEBUG_ASSERT(mask == (stat & mask));
+#endif
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask)
+{
+ u32 stat;
+ _mali_osk_errcode_t err;
+#if !defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+ u32 current_domain;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+ MALI_DEBUG_ASSERT(mask <= pmu->registered_cores_mask);
+ MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT) &
+ PMU_REG_VAL_IRQ));
+
+ MALI_DEBUG_PRINT(3,
+ ("PMU power up: ........................ [%s]\n",
+ mali_pm_mask_to_string(mask)));
+
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->registered_cores_mask;
+
+ mask &= ~(0x1 << MALI_DOMAIN_INDEX_DUMMY);
+ if (0 == mask || 0 == (stat & mask)) return _MALI_OSK_ERR_OK;
+
+ /*
+ * Assert that we are only powering up domains which are currently
+ * powered down.
+ */
+ MALI_DEBUG_ASSERT(mask == (stat & mask));
+
+#if defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_POWER_UP, mask);
+
+ err = mali_pmu_wait_for_command_finish(pmu);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+#else
+ for (current_domain = 1;
+ current_domain <= pmu->registered_cores_mask;
+ current_domain <<= 1) {
+ if (current_domain & mask & stat) {
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_POWER_UP,
+ current_domain);
+
+ err = mali_pmu_wait_for_command_finish(pmu);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+ }
+ }
+#endif
+
+#if defined(DEBUG)
+ /* Verify power status of domains after power up */
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ MALI_DEBUG_ASSERT(0 == (stat & mask));
+#endif /* defined(DEBUG) */
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(
+ struct mali_pmu_core *pmu)
+{
+ u32 rawstat;
+ u32 timeout = MALI_REG_POLL_COUNT_SLOW;
+
+ MALI_DEBUG_ASSERT(pmu);
+
+ /* Wait for the command to complete */
+ do {
+ rawstat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT);
+ --timeout;
+ } while (0 == (rawstat & PMU_REG_VAL_IRQ) && 0 < timeout);
+
+ MALI_DEBUG_ASSERT(0 < timeout);
+
+ if (0 == timeout) {
+ return _MALI_OSK_ERR_TIMEOUT;
+ }
+
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_pmu.h b/drivers/gpu/arm/utgard/common/mali_pmu.h
new file mode 100644
index 000000000000..5ca78795f535
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pmu.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.h
+ * Platform specific Mali driver functions
+ */
+
+#ifndef __MALI_PMU_H__
+#define __MALI_PMU_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_hw_core.h"
+
+/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask
+ */
+struct mali_pmu_core {
+ struct mali_hw_core hw_core;
+ u32 registered_cores_mask;
+ u32 switch_delay;
+};
+
+/** @brief Register layout for hardware PMU
+ */
+typedef enum {
+ PMU_REG_ADDR_MGMT_POWER_UP = 0x00, /*< Power up register */
+ PMU_REG_ADDR_MGMT_POWER_DOWN = 0x04, /*< Power down register */
+ PMU_REG_ADDR_MGMT_STATUS = 0x08, /*< Core sleep status register */
+ PMU_REG_ADDR_MGMT_INT_MASK = 0x0C, /*< Interrupt mask register */
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT = 0x10, /*< Interrupt raw status register */
+ PMU_REG_ADDR_MGMT_INT_CLEAR = 0x18, /*< Interrupt clear register */
+ PMU_REG_ADDR_MGMT_SW_DELAY = 0x1C, /*< Switch delay register */
+ PMU_REGISTER_ADDRESS_SPACE_SIZE = 0x28, /*< Size of register space */
+} pmu_reg_addr_mgmt_addr;
+
+#define PMU_REG_VAL_IRQ 1
+
+extern struct mali_pmu_core *mali_global_pmu_core;
+
+/** @brief Initialisation of MALI PMU
+ *
+ * This is called from entry point of the driver in order to create and intialize the PMU resource
+ *
+ * @param resource it will be a pointer to a PMU resource
+ * @param number_of_pp_cores Number of found PP resources in configuration
+ * @param number_of_l2_caches Number of found L2 cache resources in configuration
+ * @return The created PMU object, or NULL in case of failure.
+ */
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource);
+
+/** @brief It deallocates the PMU resource
+ *
+ * This is called on the exit of the driver to terminate the PMU resource
+ *
+ * @param pmu Pointer to PMU core object to delete
+ */
+void mali_pmu_delete(struct mali_pmu_core *pmu);
+
+/** @brief Set registered cores mask
+ *
+ * @param pmu Pointer to PMU core object
+ * @param mask All available/valid domain bits
+ */
+void mali_pmu_set_registered_cores_mask(struct mali_pmu_core *pmu, u32 mask);
+
+/** @brief Retrieves the Mali PMU core object (if any)
+ *
+ * @return The Mali PMU object, or NULL if no PMU exists.
+ */
+MALI_STATIC_INLINE struct mali_pmu_core *mali_pmu_get_global_pmu_core(void)
+{
+ return mali_global_pmu_core;
+}
+
+/** @brief Reset PMU core
+ *
+ * @param pmu Pointer to PMU core object to reset
+ */
+void mali_pmu_reset(struct mali_pmu_core *pmu);
+
+void mali_pmu_power_up_all(struct mali_pmu_core *pmu);
+
+void mali_pmu_power_down_all(struct mali_pmu_core *pmu);
+
+/** @brief Returns a mask of the currently powered up domains
+ *
+ * @param pmu Pointer to PMU core object
+ */
+MALI_STATIC_INLINE u32 mali_pmu_get_mask(struct mali_pmu_core *pmu)
+{
+ u32 stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ return ((~stat) & pmu->registered_cores_mask);
+}
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * Called to power down the specified cores.
+ *
+ * @param pmu Pointer to PMU core object to power down
+ * @param mask Mask specifying which power domains to power down
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask);
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ *
+ * Called to power up the specified cores.
+ *
+ * @param pmu Pointer to PMU core object to power up
+ * @param mask Mask specifying which power domains to power up
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask);
+
+#endif /* __MALI_PMU_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_pp.c b/drivers/gpu/arm/utgard/common/mali_pp.c
new file mode 100644
index 000000000000..68bfd50bf9ae
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pp.c
@@ -0,0 +1,501 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp_job.h"
+#include "mali_pp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#if defined(CONFIG_MALI400_PROFILING)
+#include "mali_osk_profiling.h"
+#endif
+
+/* Number of frame registers on Mali-200 */
+#define MALI_PP_MALI200_NUM_FRAME_REGISTERS ((0x04C/4)+1)
+/* Number of frame registers on Mali-300 and later */
+#define MALI_PP_MALI400_NUM_FRAME_REGISTERS ((0x058/4)+1)
+
+static struct mali_pp_core *mali_global_pp_cores[MALI_MAX_NUMBER_OF_PP_CORES] = { NULL };
+static u32 mali_global_num_pp_cores = 0;
+
+/* Interrupt handlers */
+static void mali_pp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id)
+{
+ struct mali_pp_core *core = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Mali PP: Creating Mali PP core: %s\n", resource->description));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Base address of PP core: 0x%x\n", resource->base));
+
+ if (mali_global_num_pp_cores >= MALI_MAX_NUMBER_OF_PP_CORES) {
+ MALI_PRINT_ERROR(("Mali PP: Too many PP core objects created\n"));
+ return NULL;
+ }
+
+ core = _mali_osk_calloc(1, sizeof(struct mali_pp_core));
+ if (NULL != core) {
+ core->core_id = mali_global_num_pp_cores;
+ core->bcast_id = bcast_id;
+
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI200_REG_SIZEOF_REGISTER_BANK)) {
+ _mali_osk_errcode_t ret;
+
+ if (!is_virtual) {
+ ret = mali_pp_reset(core);
+ } else {
+ ret = _MALI_OSK_ERR_OK;
+ }
+
+ if (_MALI_OSK_ERR_OK == ret) {
+ ret = mali_group_add_pp_core(group, core);
+ if (_MALI_OSK_ERR_OK == ret) {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ MALI_DEBUG_ASSERT(!is_virtual || -1 != resource->irq);
+
+ core->irq = _mali_osk_irq_init(resource->irq,
+ mali_group_upper_half_pp,
+ group,
+ mali_pp_irq_probe_trigger,
+ mali_pp_irq_probe_ack,
+ core,
+ resource->description);
+ if (NULL != core->irq) {
+ mali_global_pp_cores[mali_global_num_pp_cores] = core;
+ mali_global_num_pp_cores++;
+
+ return core;
+ } else {
+ MALI_PRINT_ERROR(("Mali PP: Failed to setup interrupt handlers for PP core %s\n", core->hw_core.description));
+ }
+ mali_group_remove_pp_core(group);
+ } else {
+ MALI_PRINT_ERROR(("Mali PP: Failed to add core %s to group\n", core->hw_core.description));
+ }
+ }
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ } else {
+ MALI_PRINT_ERROR(("Mali PP: Failed to allocate memory for PP core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_pp_delete(struct mali_pp_core *core)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ _mali_osk_irq_term(core->irq);
+ mali_hw_core_delete(&core->hw_core);
+
+ /* Remove core from global list */
+ for (i = 0; i < mali_global_num_pp_cores; i++) {
+ if (mali_global_pp_cores[i] == core) {
+ mali_global_pp_cores[i] = NULL;
+ mali_global_num_pp_cores--;
+
+ if (i != mali_global_num_pp_cores) {
+ /* We removed a PP core from the middle of the array -- move the last
+ * PP core to the current position to close the gap */
+ mali_global_pp_cores[i] = mali_global_pp_cores[mali_global_num_pp_cores];
+ mali_global_pp_cores[mali_global_num_pp_cores] = NULL;
+ }
+
+ break;
+ }
+ }
+
+ _mali_osk_free(core);
+}
+
+void mali_pp_stop_bus(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ /* Will only send the stop bus command, and not wait for it to complete */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core)
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Send the stop bus command. */
+ mali_pp_stop_bus(core);
+
+ /* Wait for bus to be stopped */
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED)
+ break;
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Mali PP: Failed to stop bus on %s. Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+/* Frame register reset values.
+ * Taken from the Mali400 TRM, 3.6. Pixel processor control register summary */
+static const u32 mali_frame_registers_reset_values[_MALI_PP_MAX_FRAME_REGISTERS] = {
+ 0x0, /* Renderer List Address Register */
+ 0x0, /* Renderer State Word Base Address Register */
+ 0x0, /* Renderer Vertex Base Register */
+ 0x2, /* Feature Enable Register */
+ 0x0, /* Z Clear Value Register */
+ 0x0, /* Stencil Clear Value Register */
+ 0x0, /* ABGR Clear Value 0 Register */
+ 0x0, /* ABGR Clear Value 1 Register */
+ 0x0, /* ABGR Clear Value 2 Register */
+ 0x0, /* ABGR Clear Value 3 Register */
+ 0x0, /* Bounding Box Left Right Register */
+ 0x0, /* Bounding Box Bottom Register */
+ 0x0, /* FS Stack Address Register */
+ 0x0, /* FS Stack Size and Initial Value Register */
+ 0x0, /* Reserved */
+ 0x0, /* Reserved */
+ 0x0, /* Origin Offset X Register */
+ 0x0, /* Origin Offset Y Register */
+ 0x75, /* Subpixel Specifier Register */
+ 0x0, /* Tiebreak mode Register */
+ 0x0, /* Polygon List Format Register */
+ 0x0, /* Scaling Register */
+ 0x0 /* Tilebuffer configuration Register */
+};
+
+/* WBx register reset values */
+static const u32 mali_wb_registers_reset_values[_MALI_PP_MAX_WB_REGISTERS] = {
+ 0x0, /* WBx Source Select Register */
+ 0x0, /* WBx Target Address Register */
+ 0x0, /* WBx Target Pixel Format Register */
+ 0x0, /* WBx Target AA Format Register */
+ 0x0, /* WBx Target Layout */
+ 0x0, /* WBx Target Scanline Length */
+ 0x0, /* WBx Target Flags Register */
+ 0x0, /* WBx MRT Enable Register */
+ 0x0, /* WBx MRT Offset Register */
+ 0x0, /* WBx Global Test Enable Register */
+ 0x0, /* WBx Global Test Reference Value Register */
+ 0x0 /* WBx Global Test Compare Function Register */
+};
+
+/* Performance Counter 0 Enable Register reset value */
+static const u32 mali_perf_cnt_enable_reset_value = 0;
+
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core)
+{
+ /* Bus must be stopped before calling this function */
+ const u32 reset_wait_target_register = MALI200_REG_ADDR_MGMT_PERF_CNT_0_LIMIT;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description));
+
+ /* Set register to a bogus value. The register will be used to detect when reset is complete */
+ mali_hw_core_register_write_relaxed(&core->hw_core, reset_wait_target_register, reset_invalid_value);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+
+ /* Force core to reset */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET);
+
+ /* Wait for reset to be complete */
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value);
+ if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register)) {
+ break;
+ }
+ }
+
+ if (MALI_REG_POLL_COUNT_FAST == i) {
+ MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n"));
+ }
+
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, 0x00000000); /* set it back to the default */
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_pp_reset_async(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Reset of core %s\n", core->hw_core.description));
+
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET);
+}
+
+_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core)
+{
+ int i;
+ u32 rawstat = 0;
+
+ for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ u32 status = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+ if (!(status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)) {
+ rawstat = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT);
+ if (rawstat == MALI400PP_REG_VAL_IRQ_RESET_COMPLETED) {
+ break;
+ }
+ }
+ }
+
+ if (i == MALI_REG_POLL_COUNT_FAST) {
+ MALI_PRINT_ERROR(("Mali PP: Failed to reset core %s, rawstat: 0x%08x\n",
+ core->hw_core.description, rawstat));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core)
+{
+ mali_pp_reset_async(core);
+ return mali_pp_reset_wait(core);
+}
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual)
+{
+ u32 relative_address;
+ u32 start_index;
+ u32 nr_of_regs;
+ u32 *frame_registers = mali_pp_job_get_frame_registers(job);
+ u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
+ u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
+ u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
+ u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, sub_job);
+ u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, sub_job);
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Write frame registers */
+
+ /*
+ * There are two frame registers which are different for each sub job:
+ * 1. The Renderer List Address Register (MALI200_REG_ADDR_FRAME)
+ * 2. The FS Stack Address Register (MALI200_REG_ADDR_STACK)
+ */
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_FRAME / sizeof(u32)]);
+
+ /* For virtual jobs, the stack address shouldn't be broadcast but written individually */
+ if (!mali_pp_job_is_virtual(job) || restart_virtual) {
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_STACK / sizeof(u32)]);
+ }
+
+ /* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
+ relative_address = MALI200_REG_ADDR_RSW;
+ start_index = MALI200_REG_ADDR_RSW / sizeof(u32);
+ nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32);
+
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
+ relative_address, &frame_registers[start_index],
+ nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+ /* MALI200_REG_ADDR_STACK_SIZE */
+ relative_address = MALI200_REG_ADDR_STACK_SIZE;
+ start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32);
+
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core,
+ relative_address, frame_registers[start_index],
+ mali_frame_registers_reset_values[start_index]);
+
+ /* Skip 2 reserved registers */
+
+ /* Write remaining registers */
+ relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X;
+ start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+ nr_of_regs = MALI_PP_MALI400_NUM_FRAME_REGISTERS - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
+
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core,
+ relative_address, &frame_registers[start_index],
+ nr_of_regs, &mali_frame_registers_reset_values[start_index]);
+
+ /* Write WBx registers */
+ if (wb0_registers[0]) { /* M200_WB0_REG_SOURCE_SELECT register */
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (wb1_registers[0]) { /* M200_WB1_REG_SOURCE_SELECT register */
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (wb2_registers[0]) { /* M200_WB2_REG_SOURCE_SELECT register */
+ mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+ }
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
+ }
+
+#ifdef CONFIG_MALI400_HEATMAPS_ENABLED
+ if (job->uargs.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_HEATMAP_ENABLE) {
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERFMON_CONTR, ((job->uargs.tilesx & 0x3FF) << 16) | 1);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERFMON_BASE, job->uargs.heatmap_mem & 0xFFFFFFF8);
+ }
+#endif /* CONFIG_MALI400_HEATMAPS_ENABLED */
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: Starting job 0x%08X part %u/%u on PP core %s\n", job, sub_job + 1, mali_pp_job_get_sub_job_count(job), core->hw_core.description));
+
+ /* Adding barrier to make sure all rester writes are finished */
+ _mali_osk_write_mem_barrier();
+
+ /* This is the command that starts the core.
+ *
+ * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just
+ * force core to assert the completion interrupt.
+ */
+#if !defined(PROFILING_SKIP_PP_JOBS)
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+#else
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_END_OF_FRAME);
+#endif
+
+ /* Adding barrier to make sure previous rester writes is finished */
+ _mali_osk_write_mem_barrier();
+}
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION);
+}
+
+struct mali_pp_core *mali_pp_get_global_pp_core(u32 index)
+{
+ if (mali_global_num_pp_cores > index) {
+ return mali_global_pp_cores[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_pp_get_glob_num_pp_cores(void)
+{
+ return mali_global_num_pp_cores;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static void mali_pp_irq_probe_trigger(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_BUS_ERROR);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+ if (MALI200_REG_VAL_IRQ_BUS_ERROR & irq_readout) {
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_BUS_ERROR);
+ _mali_osk_mem_barrier();
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+
+#if 0
+static void mali_pp_print_registers(struct mali_pp_core *core)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_VERSION = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_MASK = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE)));
+}
+#endif
+
+#if 0
+void mali_pp_print_state(struct mali_pp_core *core)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PP: State: 0x%08x\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+}
+#endif
+
+void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob)
+{
+ u32 val0 = 0;
+ u32 val1 = 0;
+ u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, subjob);
+ u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, subjob);
+#if defined(CONFIG_MALI400_PROFILING)
+ int counter_index = COUNTER_FP_0_C0 + (2 * child->core_id);
+#endif
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
+ val0 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ mali_pp_job_set_perf_counter_value0(job, subjob, val0);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(counter_index, val0);
+ _mali_osk_profiling_record_global_counters(counter_index, val0);
+#endif
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
+ val1 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ mali_pp_job_set_perf_counter_value1(job, subjob, val1);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_report_hw_counter(counter_index + 1, val1);
+ _mali_osk_profiling_record_global_counters(counter_index + 1, val1);
+#endif
+ }
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\tPP #%d: %s\n", core->core_id, core->hw_core.description);
+
+ return n;
+}
+#endif
diff --git a/drivers/gpu/arm/utgard/common/mali_pp.h b/drivers/gpu/arm/utgard/common/mali_pp.h
new file mode 100644
index 000000000000..45712a30e831
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pp.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_H__
+#define __MALI_PP_H__
+
+#include "mali_osk.h"
+#include "mali_pp_job.h"
+#include "mali_hw_core.h"
+
+struct mali_group;
+
+#define MALI_MAX_NUMBER_OF_PP_CORES 9
+
+/**
+ * Definition of the PP core struct
+ * Used to track a PP core in the system.
+ */
+struct mali_pp_core {
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+ u32 core_id; /**< Unique core ID */
+ u32 bcast_id; /**< The "flag" value used by the Mali-450 broadcast and DLBU unit */
+};
+
+_mali_osk_errcode_t mali_pp_initialize(void);
+void mali_pp_terminate(void);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id);
+void mali_pp_delete(struct mali_pp_core *core);
+
+void mali_pp_stop_bus(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core);
+void mali_pp_reset_async(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core);
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual);
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core);
+
+MALI_STATIC_INLINE u32 mali_pp_core_get_id(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->core_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_core_get_bcast_id(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->bcast_id;
+}
+
+struct mali_pp_core *mali_pp_get_global_pp_core(u32 index);
+u32 mali_pp_get_glob_num_pp_cores(void);
+
+/* Debug */
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size);
+
+/**
+ * Put instrumented HW counters from the core(s) to the job object (if enabled)
+ *
+ * parent and child is always the same, except for virtual jobs on Mali-450.
+ * In this case, the counters will be enabled on the virtual core (parent),
+ * but values need to be read from the child cores.
+ *
+ * @param parent The core used to see if the counters was enabled
+ * @param child The core to actually read the values from
+ * @job Job object to update with counter values (if enabled)
+ * @subjob Which subjob the counters are applicable for (core ID for virtual jobs)
+ */
+void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob);
+
+MALI_STATIC_INLINE const char *mali_pp_core_description(struct mali_pp_core *core)
+{
+ return core->hw_core.description;
+}
+
+MALI_STATIC_INLINE enum mali_interrupt_result mali_pp_get_interrupt_result(struct mali_pp_core *core)
+{
+ u32 rawstat_used = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) &
+ MALI200_REG_VAL_IRQ_MASK_USED;
+ if (0 == rawstat_used) {
+ return MALI_INTERRUPT_RESULT_NONE;
+ } else if (MALI200_REG_VAL_IRQ_END_OF_FRAME == rawstat_used) {
+ return MALI_INTERRUPT_RESULT_SUCCESS;
+ }
+ return MALI_INTERRUPT_RESULT_ERROR;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_get_rawstat(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core,
+ MALI200_REG_ADDR_MGMT_INT_RAWSTAT);
+}
+
+
+MALI_STATIC_INLINE u32 mali_pp_is_active(struct mali_pp_core *core)
+{
+ u32 status = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+ return (status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE void mali_pp_mask_all_interrupts(struct mali_pp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+}
+
+MALI_STATIC_INLINE void mali_pp_enable_interrupts(struct mali_pp_core *core)
+{
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+}
+
+MALI_STATIC_INLINE void mali_pp_write_addr_renderer_list(struct mali_pp_core *core,
+ struct mali_pp_job *job, u32 subjob)
+{
+ u32 addr = mali_pp_job_get_addr_frame(job, subjob);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_FRAME, addr);
+}
+
+
+MALI_STATIC_INLINE void mali_pp_write_addr_stack(struct mali_pp_core *core, struct mali_pp_job *job)
+{
+ u32 addr = mali_pp_job_get_addr_stack(job, core->core_id);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_STACK, addr);
+}
+
+#endif /* __MALI_PP_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_pp_job.c b/drivers/gpu/arm/utgard/common/mali_pp_job.c
new file mode 100644
index 000000000000..5528360841af
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pp_job.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_executor.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#include "linux/mali_memory_dma_buf.h"
+#endif
+#include "mali_memory_swap_alloc.h"
+#include "mali_scheduler.h"
+
+static u32 pp_counter_src0 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+static u32 pp_counter_src1 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+static _mali_osk_atomic_t pp_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
+static u32 pp_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER };
+static u32 pp_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER };
+
+void mali_pp_job_initialize(void)
+{
+ _mali_osk_atomic_init(&pp_counter_per_sub_job_count, 0);
+}
+
+void mali_pp_job_terminate(void)
+{
+ _mali_osk_atomic_term(&pp_counter_per_sub_job_count);
+}
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session,
+ _mali_uk_pp_start_job_s __user *uargs, u32 id)
+{
+ struct mali_pp_job *job;
+ u32 perf_counter_flag;
+
+ job = _mali_osk_calloc(1, sizeof(struct mali_pp_job));
+ if (NULL != job) {
+
+ _mali_osk_list_init(&job->list);
+ _mali_osk_list_init(&job->session_fb_lookup_list);
+
+ if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) {
+ goto fail;
+ }
+
+ if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) {
+ MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n"));
+ goto fail;
+ }
+
+ if (!mali_pp_job_use_no_notification(job)) {
+ job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s));
+ if (NULL == job->finished_notification) goto fail;
+ }
+
+ perf_counter_flag = mali_pp_job_get_perf_counter_flag(job);
+
+ /* case when no counters came from user space
+ * so pass the debugfs / DS-5 provided global ones to the job object */
+ if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) ||
+ (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) {
+ u32 sub_job_count = _mali_osk_atomic_read(&pp_counter_per_sub_job_count);
+
+ /* These counters apply for all virtual jobs, and where no per sub job counter is specified */
+ job->uargs.perf_counter_src0 = pp_counter_src0;
+ job->uargs.perf_counter_src1 = pp_counter_src1;
+
+ /* We only copy the per sub job array if it is enabled with at least one counter */
+ if (0 < sub_job_count) {
+ job->perf_counter_per_sub_job_count = sub_job_count;
+ _mali_osk_memcpy(job->perf_counter_per_sub_job_src0, pp_counter_per_sub_job_src0, sizeof(pp_counter_per_sub_job_src0));
+ _mali_osk_memcpy(job->perf_counter_per_sub_job_src1, pp_counter_per_sub_job_src1, sizeof(pp_counter_per_sub_job_src1));
+ }
+ }
+
+ job->session = session;
+ job->id = id;
+
+ job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1;
+ job->pid = _mali_osk_get_pid();
+ job->tid = _mali_osk_get_tid();
+
+ _mali_osk_atomic_init(&job->sub_jobs_completed, 0);
+ _mali_osk_atomic_init(&job->sub_job_errors, 0);
+ job->swap_status = MALI_NO_SWAP_IN;
+ job->user_notification = MALI_FALSE;
+ job->num_pp_cores_in_virtual = 0;
+
+ if (job->uargs.num_memory_cookies > session->allocation_mgr.mali_allocation_num) {
+ MALI_PRINT_ERROR(("Mali PP job: The number of memory cookies is invalid !\n"));
+ goto fail;
+ }
+
+ if (job->uargs.num_memory_cookies > 0) {
+ u32 size;
+ u32 __user *memory_cookies = (u32 __user *)(uintptr_t)job->uargs.memory_cookies;
+
+ size = sizeof(*memory_cookies) * (job->uargs.num_memory_cookies);
+
+ job->memory_cookies = _mali_osk_malloc(size);
+ if (NULL == job->memory_cookies) {
+ MALI_PRINT_ERROR(("Mali PP job: Failed to allocate %d bytes of memory cookies!\n", size));
+ goto fail;
+ }
+
+ if (0 != _mali_osk_copy_from_user(job->memory_cookies, memory_cookies, size)) {
+ MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size));
+ goto fail;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_pp_job_check(job)) {
+ /* Not a valid job. */
+ goto fail;
+ }
+
+ mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_PP, NULL, job);
+ mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence));
+
+ mali_mem_swap_in_pages(job);
+
+ return job;
+ }
+
+fail:
+ if (NULL != job) {
+ mali_pp_job_delete(job);
+ }
+
+ return NULL;
+}
+
+void mali_pp_job_delete(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
+
+ if (NULL != job->memory_cookies) {
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ /* Unmap buffers attached to job */
+ mali_dma_buf_unmap_job(job);
+#endif
+ if (MALI_NO_SWAP_IN != job->swap_status) {
+ mali_mem_swap_out_pages(job);
+ }
+
+ _mali_osk_free(job->memory_cookies);
+ }
+
+ if (job->user_notification) {
+ mali_scheduler_return_pp_job_to_user(job,
+ job->num_pp_cores_in_virtual);
+ }
+
+ if (NULL != job->finished_notification) {
+ _mali_osk_notification_delete(job->finished_notification);
+ }
+
+ _mali_osk_atomic_term(&job->sub_jobs_completed);
+ _mali_osk_atomic_term(&job->sub_job_errors);
+
+ _mali_osk_free(job);
+}
+
+void mali_pp_job_list_add(struct mali_pp_job *job, _mali_osk_list_t *list)
+{
+ struct mali_pp_job *iter;
+ struct mali_pp_job *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ /* Find position in list/queue where job should be added. */
+ _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, list,
+ struct mali_pp_job, list) {
+ /* job should be started after iter if iter is in progress. */
+ if (0 < iter->sub_jobs_started) {
+ break;
+ }
+
+ /*
+ * job should be started after iter if it has a higher
+ * job id. A span is used to handle job id wrapping.
+ */
+ if ((mali_pp_job_get_id(job) -
+ mali_pp_job_get_id(iter)) <
+ MALI_SCHEDULER_JOB_ID_SPAN) {
+ break;
+ }
+ }
+
+ _mali_osk_list_add(&job->list, &iter->list);
+}
+
+
+u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job)
+{
+ /* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */
+ if (mali_pp_job_is_virtual(job) || 0 == job->perf_counter_per_sub_job_count) {
+ return job->uargs.perf_counter_src0;
+ }
+
+ /* Use per sub job counter if enabled... */
+ if (MALI_HW_CORE_NO_COUNTER != job->perf_counter_per_sub_job_src0[sub_job]) {
+ return job->perf_counter_per_sub_job_src0[sub_job];
+ }
+
+ /* ...else default to global job counter */
+ return job->uargs.perf_counter_src0;
+}
+
+u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job)
+{
+ /* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */
+ if (mali_pp_job_is_virtual(job) || 0 == job->perf_counter_per_sub_job_count) {
+ /* Virtual jobs always use the global job counter */
+ return job->uargs.perf_counter_src1;
+ }
+
+ /* Use per sub job counter if enabled... */
+ if (MALI_HW_CORE_NO_COUNTER != job->perf_counter_per_sub_job_src1[sub_job]) {
+ return job->perf_counter_per_sub_job_src1[sub_job];
+ }
+
+ /* ...else default to global job counter */
+ return job->uargs.perf_counter_src1;
+}
+
+void mali_pp_job_set_pp_counter_global_src0(u32 counter)
+{
+ pp_counter_src0 = counter;
+}
+
+void mali_pp_job_set_pp_counter_global_src1(u32 counter)
+{
+ pp_counter_src1 = counter;
+}
+
+void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter)
+{
+ MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+
+ if (MALI_HW_CORE_NO_COUNTER == pp_counter_per_sub_job_src0[sub_job]) {
+ /* increment count since existing counter was disabled */
+ _mali_osk_atomic_inc(&pp_counter_per_sub_job_count);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == counter) {
+ /* decrement count since new counter is disabled */
+ _mali_osk_atomic_dec(&pp_counter_per_sub_job_count);
+ }
+
+ /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */
+
+ pp_counter_per_sub_job_src0[sub_job] = counter;
+}
+
+void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter)
+{
+ MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+
+ if (MALI_HW_CORE_NO_COUNTER == pp_counter_per_sub_job_src1[sub_job]) {
+ /* increment count since existing counter was disabled */
+ _mali_osk_atomic_inc(&pp_counter_per_sub_job_count);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == counter) {
+ /* decrement count since new counter is disabled */
+ _mali_osk_atomic_dec(&pp_counter_per_sub_job_count);
+ }
+
+ /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */
+
+ pp_counter_per_sub_job_src1[sub_job] = counter;
+}
+
+u32 mali_pp_job_get_pp_counter_global_src0(void)
+{
+ return pp_counter_src0;
+}
+
+u32 mali_pp_job_get_pp_counter_global_src1(void)
+{
+ return pp_counter_src1;
+}
+
+u32 mali_pp_job_get_pp_counter_sub_job_src0(u32 sub_job)
+{
+ MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+ return pp_counter_per_sub_job_src0[sub_job];
+}
+
+u32 mali_pp_job_get_pp_counter_sub_job_src1(u32 sub_job)
+{
+ MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS);
+ return pp_counter_per_sub_job_src1[sub_job];
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_pp_job.h b/drivers/gpu/arm/utgard/common/mali_pp_job.h
new file mode 100644
index 000000000000..7b9d2efa3019
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_pp_job.h
@@ -0,0 +1,574 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_JOB_H__
+#define __MALI_PP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_core.h"
+#include "mali_dlbu.h"
+#include "mali_timeline.h"
+#include "mali_scheduler.h"
+#include "mali_executor.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#include "linux/mali_memory_dma_buf.h"
+#endif
+
+typedef enum pp_job_status {
+ MALI_NO_SWAP_IN,
+ MALI_SWAP_IN_FAIL,
+ MALI_SWAP_IN_SUCC,
+} pp_job_status;
+
+/**
+ * This structure represents a PP job, including all sub jobs.
+ *
+ * The PP job object itself is not protected by any single lock,
+ * but relies on other locks instead (scheduler, executor and timeline lock).
+ * Think of the job object as moving between these sub systems through-out
+ * its lifetime. Different part of the PP job struct is used by different
+ * subsystems. Accessor functions ensure that correct lock is taken.
+ * Do NOT access any data members directly from outside this module!
+ */
+struct mali_pp_job {
+ /*
+ * These members are typically only set at creation,
+ * and only read later on.
+ * They do not require any lock protection.
+ */
+ _mali_uk_pp_start_job_s uargs; /**< Arguments from user space */
+ struct mali_session_data *session; /**< Session which submitted this job */
+ u32 pid; /**< Process ID of submitting process */
+ u32 tid; /**< Thread ID of submitting thread */
+ u32 id; /**< Identifier for this job in kernel space (sequential numbering) */
+ u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
+ _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */
+ u32 perf_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
+ u32 perf_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src0 */
+ u32 perf_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src1 */
+ u32 sub_jobs_num; /**< Number of subjobs; set to 1 for Mali-450 if DLBU is used, otherwise equals number of PP cores */
+
+ pp_job_status swap_status; /**< Used to track each PP job swap status, if fail, we need to drop them in scheduler part */
+ mali_bool user_notification; /**< When we deferred delete PP job, we need to judge if we need to send job finish notification to user space */
+ u32 num_pp_cores_in_virtual; /**< How many PP cores we have when job finished */
+
+ /*
+ * These members are used by both scheduler and executor.
+ * They are "protected" by atomic operations.
+ */
+ _mali_osk_atomic_t sub_jobs_completed; /**< Number of completed sub-jobs in this superjob */
+ _mali_osk_atomic_t sub_job_errors; /**< Bitfield with errors (errors for each single sub-job is or'ed together) */
+
+ /*
+ * These members are used by scheduler, but only when no one else
+ * knows about this job object but the working function.
+ * No lock is thus needed for these.
+ */
+ u32 *memory_cookies; /**< Memory cookies attached to job */
+
+ /*
+ * These members are used by the scheduler,
+ * protected by scheduler lock
+ */
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+ _mali_osk_list_t session_fb_lookup_list; /**< Used to link jobs together from the same frame builder in the session */
+ u32 sub_jobs_started; /**< Total number of sub-jobs started (always started in ascending order) */
+
+ /*
+ * Set by executor/group on job completion, read by scheduler when
+ * returning job to user. Hold executor lock when setting,
+ * no lock needed when reading
+ */
+ u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 0 (to be returned to user space), one for each sub job */
+ u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 1 (to be returned to user space), one for each sub job */
+};
+
+void mali_pp_job_initialize(void);
+void mali_pp_job_terminate(void);
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id);
+void mali_pp_job_delete(struct mali_pp_job *job);
+
+u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job);
+u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job);
+
+void mali_pp_job_set_pp_counter_global_src0(u32 counter);
+void mali_pp_job_set_pp_counter_global_src1(u32 counter);
+void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter);
+void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter);
+
+u32 mali_pp_job_get_pp_counter_global_src0(void);
+u32 mali_pp_job_get_pp_counter_global_src1(void);
+u32 mali_pp_job_get_pp_counter_sub_job_src0(u32 sub_job);
+u32 mali_pp_job_get_pp_counter_sub_job_src1(u32 sub_job);
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_id(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_cache_order(struct mali_pp_job *job,
+ u32 cache_order)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ job->cache_order = cache_order;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_cache_order(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (NULL == job) ? 0 : job->cache_order;
+}
+
+MALI_STATIC_INLINE u64 mali_pp_job_get_user_id(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.user_job_ptr;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_frame_builder_id(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_flush_id(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.flush_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_pid(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->pid;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_tid(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->tid;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_frame_registers(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.frame_registers;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_dlbu_registers(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.dlbu_registers;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_virtual(struct mali_pp_job *job)
+{
+#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470))
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (0 == job->uargs.num_cores) ? MALI_TRUE : MALI_FALSE;
+#else
+ return MALI_FALSE;
+#endif
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_frame(struct mali_pp_job *job, u32 sub_job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (mali_pp_job_is_virtual(job)) {
+ return MALI_DLBU_VIRT_ADDR;
+ } else if (0 == sub_job) {
+ return job->uargs.frame_registers[MALI200_REG_ADDR_FRAME / sizeof(u32)];
+ } else if (sub_job < _MALI_PP_MAX_SUB_JOBS) {
+ return job->uargs.frame_registers_addr_frame[sub_job - 1];
+ }
+
+ return 0;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_stack(struct mali_pp_job *job, u32 sub_job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (0 == sub_job) {
+ return job->uargs.frame_registers[MALI200_REG_ADDR_STACK / sizeof(u32)];
+ } else if (sub_job < _MALI_PP_MAX_SUB_JOBS) {
+ return job->uargs.frame_registers_addr_stack[sub_job - 1];
+ }
+
+ return 0;
+}
+
+void mali_pp_job_list_add(struct mali_pp_job *job, _mali_osk_list_t *list);
+
+MALI_STATIC_INLINE void mali_pp_job_list_addtail(struct mali_pp_job *job,
+ _mali_osk_list_t *list)
+{
+ _mali_osk_list_addtail(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_list_move(struct mali_pp_job *job,
+ _mali_osk_list_t *list)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job->list));
+ _mali_osk_list_move(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_list_remove(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ _mali_osk_list_delinit(&job->list);
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb0_registers(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb0_registers;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb1_registers(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb1_registers;
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_wb2_registers(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb2_registers;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb0_source_addr(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb1_source_addr(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb2_source_addr(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb0(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb1(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb2(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_all_writeback_unit_disabled(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] ||
+ job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] ||
+ job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT]
+ ) {
+ /* At least one output unit active */
+ return MALI_FALSE;
+ }
+
+ /* All outputs are disabled - we can abort the job */
+ return MALI_TRUE;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_fb_lookup_add(struct mali_pp_job *job)
+{
+ u32 fb_lookup_id;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ fb_lookup_id = MALI_PP_JOB_FB_LOOKUP_LIST_MASK & job->uargs.frame_builder_id;
+
+ MALI_DEBUG_ASSERT(MALI_PP_JOB_FB_LOOKUP_LIST_SIZE > fb_lookup_id);
+
+ _mali_osk_list_addtail(&job->session_fb_lookup_list,
+ &job->session->pp_job_fb_lookup_list[fb_lookup_id]);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_fb_lookup_remove(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ _mali_osk_list_delinit(&job->session_fb_lookup_list);
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_pp_job_get_session(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_started_sub_jobs(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ return (0 < job->sub_jobs_started) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_unstarted_sub_jobs(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ return (job->sub_jobs_started < job->sub_jobs_num) ? MALI_TRUE : MALI_FALSE;
+}
+
+/* Function used when we are terminating a session with jobs. Return TRUE if it has a rendering job.
+ Makes sure that no new subjobs are started. */
+MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_failed(struct mali_pp_job *job)
+{
+ u32 jobs_remaining;
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
+ job->sub_jobs_started += jobs_remaining;
+
+ /* Not the most optimal way, but this is only used in error cases */
+ for (i = 0; i < jobs_remaining; i++) {
+ _mali_osk_atomic_inc(&job->sub_jobs_completed);
+ _mali_osk_atomic_inc(&job->sub_job_errors);
+ }
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_complete(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (job->sub_jobs_num ==
+ _mali_osk_atomic_read(&job->sub_jobs_completed)) ?
+ MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_first_unstarted_sub_job(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ return job->sub_jobs_started;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_sub_job_count(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->sub_jobs_num;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_unstarted_sub_job_count(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT(job->sub_jobs_num >= job->sub_jobs_started);
+ return (job->sub_jobs_num - job->sub_jobs_started);
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_num_memory_cookies(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.num_memory_cookies;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_memory_cookie(
+ struct mali_pp_job *job, u32 index)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(index < job->uargs.num_memory_cookies);
+ MALI_DEBUG_ASSERT_POINTER(job->memory_cookies);
+ return job->memory_cookies[index];
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_needs_dma_buf_mapping(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (0 < job->uargs.num_memory_cookies) {
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_started(struct mali_pp_job *job, u32 sub_job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ /* Assert that we are marking the "first unstarted sub job" as started */
+ MALI_DEBUG_ASSERT(job->sub_jobs_started == sub_job);
+
+ job->sub_jobs_started++;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_completed(struct mali_pp_job *job, mali_bool success)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ _mali_osk_atomic_inc(&job->sub_jobs_completed);
+ if (MALI_FALSE == success) {
+ _mali_osk_atomic_inc(&job->sub_job_errors);
+ }
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_was_success(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ if (0 == _mali_osk_atomic_read(&job->sub_job_errors)) {
+ return MALI_TRUE;
+ }
+ return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_use_no_notification(
+ struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (job->uargs.flags & _MALI_PP_JOB_FLAG_NO_NOTIFICATION) ?
+ MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_pilot_job(struct mali_pp_job *job)
+{
+ /*
+ * A pilot job is currently identified as jobs which
+ * require no callback notification.
+ */
+ return mali_pp_job_use_no_notification(job);
+}
+
+MALI_STATIC_INLINE _mali_osk_notification_t *
+mali_pp_job_get_finished_notification(struct mali_pp_job *job)
+{
+ _mali_osk_notification_t *notification;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->finished_notification);
+
+ notification = job->finished_notification;
+ job->finished_notification = NULL;
+
+ return notification;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_window_surface(
+ struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (job->uargs.flags & _MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE)
+ ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_flag(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.perf_counter_flag;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value0(struct mali_pp_job *job, u32 sub_job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->perf_counter_value0[sub_job];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value1(struct mali_pp_job *job, u32 sub_job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->perf_counter_value1[sub_job];
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value0(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ job->perf_counter_value0[sub_job] = value;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value1(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ job->perf_counter_value1[sub_job] = value;
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_pp_job_check(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ if (mali_pp_job_is_virtual(job) && job->sub_jobs_num != 1) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Returns MALI_TRUE if this job has more than two sub jobs and all sub jobs are unstarted.
+ *
+ * @param job Job to check.
+ * @return MALI_TRUE if job has more than two sub jobs and all sub jobs are unstarted, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_large_and_unstarted(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual(job));
+
+ return (0 == job->sub_jobs_started && 2 < job->sub_jobs_num);
+}
+
+/**
+ * Get PP job's Timeline tracker.
+ *
+ * @param job PP job.
+ * @return Pointer to Timeline tracker for the job.
+ */
+MALI_STATIC_INLINE struct mali_timeline_tracker *mali_pp_job_get_tracker(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return &(job->tracker);
+}
+
+MALI_STATIC_INLINE u32 *mali_pp_job_get_timeline_point_ptr(
+ struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
+}
+
+
+#endif /* __MALI_PP_JOB_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_scheduler.c b/drivers/gpu/arm/utgard/common/mali_scheduler.c
new file mode 100644
index 000000000000..5547159db94c
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_scheduler.c
@@ -0,0 +1,1354 @@
+/*
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_profiling.h"
+#include "mali_kernel_utilization.h"
+#include "mali_timeline.h"
+#include "mali_gp_job.h"
+#include "mali_pp_job.h"
+#include "mali_executor.h"
+#include "mali_group.h"
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_dma_buf.h"
+#endif
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+/*
+ * ---------- static defines/constants ----------
+ */
+
+/*
+ * If dma_buf with map on demand is used, we defer job queue
+ * if in atomic context, since both might sleep.
+ */
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#define MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE 1
+#endif
+#endif
+
+
+/*
+ * ---------- global variables (exported due to inline functions) ----------
+ */
+
+/* Lock protecting this module */
+_mali_osk_spinlock_irq_t *mali_scheduler_lock_obj = NULL;
+
+/* Queue of jobs to be executed on the GP group */
+struct mali_scheduler_job_queue job_queue_gp;
+
+/* Queue of PP jobs */
+struct mali_scheduler_job_queue job_queue_pp;
+
+_mali_osk_atomic_t mali_job_id_autonumber;
+_mali_osk_atomic_t mali_job_cache_order_autonumber;
+/*
+ * ---------- static variables ----------
+ */
+
+_mali_osk_wq_work_t *scheduler_wq_pp_job_delete = NULL;
+_mali_osk_spinlock_irq_t *scheduler_pp_job_delete_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_deletion_queue);
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+static _mali_osk_wq_work_t *scheduler_wq_pp_job_queue = NULL;
+static _mali_osk_spinlock_irq_t *scheduler_pp_job_queue_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_queue_list);
+#endif
+
+/*
+ * ---------- Forward declaration of static functions ----------
+ */
+
+static mali_timeline_point mali_scheduler_submit_gp_job(
+ struct mali_session_data *session, struct mali_gp_job *job);
+static mali_timeline_point mali_scheduler_submit_pp_job(
+ struct mali_session_data *session, struct mali_pp_job *job);
+
+static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job);
+static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job);
+
+static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job,
+ mali_bool success);
+
+static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job);
+void mali_scheduler_do_pp_job_delete(void *arg);
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job);
+static void mali_scheduler_do_pp_job_queue(void *arg);
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+/*
+ * ---------- Actual implementation ----------
+ */
+
+_mali_osk_errcode_t mali_scheduler_initialize(void)
+{
+ _mali_osk_atomic_init(&mali_job_id_autonumber, 0);
+ _mali_osk_atomic_init(&mali_job_cache_order_autonumber, 0);
+
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.normal_pri);
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.high_pri);
+ job_queue_gp.depth = 0;
+ job_queue_gp.big_job_num = 0;
+
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.normal_pri);
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.high_pri);
+ job_queue_pp.depth = 0;
+ job_queue_pp.big_job_num = 0;
+
+ mali_scheduler_lock_obj = _mali_osk_spinlock_irq_init(
+ _MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER);
+ if (NULL == mali_scheduler_lock_obj) {
+ mali_scheduler_terminate();
+ }
+
+ scheduler_wq_pp_job_delete = _mali_osk_wq_create_work(
+ mali_scheduler_do_pp_job_delete, NULL);
+ if (NULL == scheduler_wq_pp_job_delete) {
+ mali_scheduler_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ scheduler_pp_job_delete_lock = _mali_osk_spinlock_irq_init(
+ _MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+ if (NULL == scheduler_pp_job_delete_lock) {
+ mali_scheduler_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+ scheduler_wq_pp_job_queue = _mali_osk_wq_create_work(
+ mali_scheduler_do_pp_job_queue, NULL);
+ if (NULL == scheduler_wq_pp_job_queue) {
+ mali_scheduler_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ scheduler_pp_job_queue_lock = _mali_osk_spinlock_irq_init(
+ _MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+ if (NULL == scheduler_pp_job_queue_lock) {
+ mali_scheduler_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_scheduler_terminate(void)
+{
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+ if (NULL != scheduler_pp_job_queue_lock) {
+ _mali_osk_spinlock_irq_term(scheduler_pp_job_queue_lock);
+ scheduler_pp_job_queue_lock = NULL;
+ }
+
+ if (NULL != scheduler_wq_pp_job_queue) {
+ _mali_osk_wq_delete_work(scheduler_wq_pp_job_queue);
+ scheduler_wq_pp_job_queue = NULL;
+ }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+ if (NULL != scheduler_pp_job_delete_lock) {
+ _mali_osk_spinlock_irq_term(scheduler_pp_job_delete_lock);
+ scheduler_pp_job_delete_lock = NULL;
+ }
+
+ if (NULL != scheduler_wq_pp_job_delete) {
+ _mali_osk_wq_delete_work(scheduler_wq_pp_job_delete);
+ scheduler_wq_pp_job_delete = NULL;
+ }
+
+ if (NULL != mali_scheduler_lock_obj) {
+ _mali_osk_spinlock_irq_term(mali_scheduler_lock_obj);
+ mali_scheduler_lock_obj = NULL;
+ }
+
+ _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
+ _mali_osk_atomic_term(&mali_job_id_autonumber);
+}
+
+u32 mali_scheduler_job_physical_head_count(void)
+{
+ /*
+ * Count how many physical sub jobs are present from the head of queue
+ * until the first virtual job is present.
+ * Early out when we have reached maximum number of PP cores (8)
+ */
+ u32 count = 0;
+ struct mali_pp_job *job;
+ struct mali_pp_job *temp;
+
+ /* Check for partially started normal pri jobs */
+ if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+ struct mali_pp_job, list);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (MALI_TRUE == mali_pp_job_has_started_sub_jobs(job)) {
+ /*
+ * Remember; virtual jobs can't be queued and started
+ * at the same time, so this must be a physical job
+ */
+ count += mali_pp_job_unstarted_sub_job_count(job);
+ if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) {
+ return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+ }
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.high_pri,
+ struct mali_pp_job, list) {
+ if (MALI_FALSE == mali_pp_job_is_virtual(job)) {
+ count += mali_pp_job_unstarted_sub_job_count(job);
+ if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) {
+ return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+ }
+ } else {
+ /* Came across a virtual job, so stop counting */
+ return count;
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.normal_pri,
+ struct mali_pp_job, list) {
+ if (MALI_FALSE == mali_pp_job_is_virtual(job)) {
+ /* any partially started is already counted */
+ if (MALI_FALSE == mali_pp_job_has_started_sub_jobs(job)) {
+ count += mali_pp_job_unstarted_sub_job_count(job);
+ if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <=
+ count) {
+ return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+ }
+ }
+ } else {
+ /* Came across a virtual job, so stop counting */
+ return count;
+ }
+ }
+
+ return count;
+}
+
+mali_bool mali_scheduler_job_next_is_virtual(void)
+{
+ struct mali_pp_job *job;
+
+ job = mali_scheduler_job_pp_virtual_peek();
+ if (NULL != job) {
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
+
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+struct mali_gp_job *mali_scheduler_job_gp_get(void)
+{
+ _mali_osk_list_t *queue;
+ struct mali_gp_job *job = NULL;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+ MALI_DEBUG_ASSERT(0 < job_queue_gp.depth);
+ MALI_DEBUG_ASSERT(job_queue_gp.big_job_num <= job_queue_gp.depth);
+
+ if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) {
+ queue = &job_queue_gp.high_pri;
+ } else {
+ queue = &job_queue_gp.normal_pri;
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(queue));
+ }
+
+ job = _MALI_OSK_LIST_ENTRY(queue->next, struct mali_gp_job, list);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ mali_gp_job_list_remove(job);
+ job_queue_gp.depth--;
+ if (job->big_job) {
+ job_queue_gp.big_job_num --;
+ if (job_queue_gp.big_job_num < MALI_MAX_PENDING_BIG_JOB) {
+ /* wake up process */
+ wait_queue_head_t *queue = mali_session_get_wait_queue();
+ wake_up(queue);
+ }
+ }
+ return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void)
+{
+ struct mali_pp_job *job = NULL;
+ struct mali_pp_job *tmp_job = NULL;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+ /*
+ * For PP jobs we favour partially started jobs in normal
+ * priority queue over unstarted jobs in high priority queue
+ */
+
+ if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+ struct mali_pp_job, list);
+ MALI_DEBUG_ASSERT(NULL != tmp_job);
+
+ if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) {
+ job = tmp_job;
+ }
+ }
+
+ if (NULL == job ||
+ MALI_FALSE == mali_pp_job_has_started_sub_jobs(job)) {
+ /*
+ * There isn't a partially started job in normal queue, so
+ * look in high priority queue.
+ */
+ if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next,
+ struct mali_pp_job, list);
+ MALI_DEBUG_ASSERT(NULL != tmp_job);
+
+ if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) {
+ job = tmp_job;
+ }
+ }
+ }
+
+ return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_virtual_peek(void)
+{
+ struct mali_pp_job *job = NULL;
+ struct mali_pp_job *tmp_job = NULL;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+ if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next,
+ struct mali_pp_job, list);
+
+ if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) {
+ job = tmp_job;
+ }
+ }
+
+ if (NULL == job) {
+ if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+ struct mali_pp_job, list);
+
+ if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) {
+ job = tmp_job;
+ }
+ }
+ }
+
+ return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_physical_get(u32 *sub_job)
+{
+ struct mali_pp_job *job = mali_scheduler_job_pp_physical_peek();
+
+ MALI_DEBUG_ASSERT(MALI_FALSE == mali_pp_job_is_virtual(job));
+
+ if (NULL != job) {
+ *sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
+
+ mali_pp_job_mark_sub_job_started(job, *sub_job);
+ if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(job)) {
+ /* Remove from queue when last sub job has been retrieved */
+ mali_pp_job_list_remove(job);
+ }
+
+ job_queue_pp.depth--;
+
+ /*
+ * Job about to start so it is no longer be
+ * possible to discard WB
+ */
+ mali_pp_job_fb_lookup_remove(job);
+ }
+
+ return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_virtual_get(void)
+{
+ struct mali_pp_job *job = mali_scheduler_job_pp_virtual_peek();
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_pp_job_is_virtual(job));
+
+ if (NULL != job) {
+ MALI_DEBUG_ASSERT(0 ==
+ mali_pp_job_get_first_unstarted_sub_job(job));
+ MALI_DEBUG_ASSERT(1 ==
+ mali_pp_job_get_sub_job_count(job));
+
+ mali_pp_job_mark_sub_job_started(job, 0);
+
+ mali_pp_job_list_remove(job);
+
+ job_queue_pp.depth--;
+
+ /*
+ * Job about to start so it is no longer be
+ * possible to discard WB
+ */
+ mali_pp_job_fb_lookup_remove(job);
+ }
+
+ return job;
+}
+
+mali_scheduler_mask mali_scheduler_activate_gp_job(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Timeline activation for job %u (0x%08X).\n",
+ mali_gp_job_get_id(job), job));
+
+ mali_scheduler_lock();
+
+ if (!mali_scheduler_queue_gp_job(job)) {
+ /* Failed to enqueue job, release job (with error) */
+
+ mali_scheduler_unlock();
+
+ mali_timeline_tracker_release(mali_gp_job_get_tracker(job));
+ mali_gp_job_signal_pp_tracker(job, MALI_FALSE);
+
+ /* This will notify user space and close the job object */
+ mali_scheduler_complete_gp_job(job, MALI_FALSE,
+ MALI_TRUE, MALI_FALSE);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ mali_scheduler_unlock();
+
+ return MALI_SCHEDULER_MASK_GP;
+}
+
+mali_scheduler_mask mali_scheduler_activate_pp_job(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Timeline activation for job %u (0x%08X).\n",
+ mali_pp_job_get_id(job), job));
+
+ if (MALI_TRUE == mali_timeline_tracker_activation_error(
+ mali_pp_job_get_tracker(job))) {
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Job %u (0x%08X) activated with error, aborting.\n",
+ mali_pp_job_get_id(job), job));
+
+ mali_scheduler_lock();
+ mali_pp_job_fb_lookup_remove(job);
+ mali_pp_job_mark_unstarted_failed(job);
+ mali_scheduler_unlock();
+
+ mali_timeline_tracker_release(mali_pp_job_get_tracker(job));
+
+ /* This will notify user space and close the job object */
+ mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+ if (mali_pp_job_needs_dma_buf_mapping(job)) {
+ mali_scheduler_deferred_pp_job_queue(job);
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+ mali_scheduler_lock();
+
+ if (!mali_scheduler_queue_pp_job(job)) {
+ /* Failed to enqueue job, release job (with error) */
+ mali_pp_job_fb_lookup_remove(job);
+ mali_pp_job_mark_unstarted_failed(job);
+ mali_scheduler_unlock();
+
+ mali_timeline_tracker_release(mali_pp_job_get_tracker(job));
+
+ /* This will notify user space and close the job object */
+ mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ mali_scheduler_unlock();
+ return MALI_SCHEDULER_MASK_PP;
+}
+
+void mali_scheduler_complete_gp_job(struct mali_gp_job *job,
+ mali_bool success,
+ mali_bool user_notification,
+ mali_bool dequeued)
+{
+ if (user_notification) {
+ mali_scheduler_return_gp_job_to_user(job, success);
+ }
+
+ if (dequeued) {
+ _mali_osk_pm_dev_ref_put();
+
+ if (mali_utilization_enabled()) {
+ mali_utilization_gp_end();
+ }
+ }
+
+ mali_gp_job_delete(job);
+}
+
+void mali_scheduler_complete_pp_job(struct mali_pp_job *job,
+ u32 num_cores_in_virtual,
+ mali_bool user_notification,
+ mali_bool dequeued)
+{
+ job->user_notification = user_notification;
+ job->num_pp_cores_in_virtual = num_cores_in_virtual;
+
+ if (dequeued) {
+#if defined(CONFIG_MALI_DVFS)
+ if (mali_pp_job_is_window_surface(job)) {
+ struct mali_session_data *session;
+ session = mali_pp_job_get_session(job);
+ mali_session_inc_num_window_jobs(session);
+ }
+#endif
+
+ _mali_osk_pm_dev_ref_put();
+
+ if (mali_utilization_enabled()) {
+ mali_utilization_pp_end();
+ }
+ }
+
+ /* With ZRAM feature enabled, all pp jobs will be force to use deferred delete. */
+ mali_scheduler_deferred_pp_job_delete(job);
+}
+
+void mali_scheduler_abort_session(struct mali_session_data *session)
+{
+ struct mali_gp_job *gp_job;
+ struct mali_gp_job *gp_tmp;
+ struct mali_pp_job *pp_job;
+ struct mali_pp_job *pp_tmp;
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_gp);
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_pp);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(session->is_aborting);
+
+ MALI_DEBUG_PRINT(3, ("Mali scheduler: Aborting all queued jobs from session 0x%08X.\n",
+ session));
+
+ mali_scheduler_lock();
+
+ /* Remove from GP normal priority queue */
+ _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.normal_pri,
+ struct mali_gp_job, list) {
+ if (mali_gp_job_get_session(gp_job) == session) {
+ mali_gp_job_list_move(gp_job, &removed_jobs_gp);
+ job_queue_gp.depth--;
+ job_queue_gp.big_job_num -= gp_job->big_job ? 1 : 0;
+ }
+ }
+
+ /* Remove from GP high priority queue */
+ _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.high_pri,
+ struct mali_gp_job, list) {
+ if (mali_gp_job_get_session(gp_job) == session) {
+ mali_gp_job_list_move(gp_job, &removed_jobs_gp);
+ job_queue_gp.depth--;
+ job_queue_gp.big_job_num -= gp_job->big_job ? 1 : 0;
+ }
+ }
+
+ /* Remove from PP normal priority queue */
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp,
+ &job_queue_pp.normal_pri,
+ struct mali_pp_job, list) {
+ if (mali_pp_job_get_session(pp_job) == session) {
+ mali_pp_job_fb_lookup_remove(pp_job);
+
+ job_queue_pp.depth -=
+ mali_pp_job_unstarted_sub_job_count(
+ pp_job);
+ mali_pp_job_mark_unstarted_failed(pp_job);
+
+ if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(pp_job)) {
+ if (mali_pp_job_is_complete(pp_job)) {
+ mali_pp_job_list_move(pp_job,
+ &removed_jobs_pp);
+ } else {
+ mali_pp_job_list_remove(pp_job);
+ }
+ }
+ }
+ }
+
+ /* Remove from PP high priority queue */
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp,
+ &job_queue_pp.high_pri,
+ struct mali_pp_job, list) {
+ if (mali_pp_job_get_session(pp_job) == session) {
+ mali_pp_job_fb_lookup_remove(pp_job);
+
+ job_queue_pp.depth -=
+ mali_pp_job_unstarted_sub_job_count(
+ pp_job);
+ mali_pp_job_mark_unstarted_failed(pp_job);
+
+ if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(pp_job)) {
+ if (mali_pp_job_is_complete(pp_job)) {
+ mali_pp_job_list_move(pp_job,
+ &removed_jobs_pp);
+ } else {
+ mali_pp_job_list_remove(pp_job);
+ }
+ }
+ }
+ }
+
+ /*
+ * Release scheduler lock so we can release trackers
+ * (which will potentially queue new jobs)
+ */
+ mali_scheduler_unlock();
+
+ /* Release and complete all (non-running) found GP jobs */
+ _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &removed_jobs_gp,
+ struct mali_gp_job, list) {
+ mali_timeline_tracker_release(mali_gp_job_get_tracker(gp_job));
+ mali_gp_job_signal_pp_tracker(gp_job, MALI_FALSE);
+ _mali_osk_list_delinit(&gp_job->list);
+ mali_scheduler_complete_gp_job(gp_job,
+ MALI_FALSE, MALI_FALSE, MALI_TRUE);
+ }
+
+ /* Release and complete non-running PP jobs */
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp, &removed_jobs_pp,
+ struct mali_pp_job, list) {
+ mali_timeline_tracker_release(mali_pp_job_get_tracker(pp_job));
+ _mali_osk_list_delinit(&pp_job->list);
+ mali_scheduler_complete_pp_job(pp_job, 0,
+ MALI_FALSE, MALI_TRUE);
+ }
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx,
+ _mali_uk_gp_start_job_s *uargs)
+{
+ struct mali_session_data *session;
+ struct mali_gp_job *job;
+ mali_timeline_point point;
+ u32 __user *point_ptr = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)ctx;
+
+ job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id(),
+ NULL);
+ if (NULL == job) {
+ MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ point_ptr = (u32 __user *)(uintptr_t)mali_gp_job_get_timeline_point_ptr(job);
+
+ point = mali_scheduler_submit_gp_job(session, job);
+
+ if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+ /*
+ * Let user space know that something failed
+ * after the job was started.
+ */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx,
+ _mali_uk_pp_start_job_s *uargs)
+{
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+ mali_timeline_point point;
+ u32 __user *point_ptr = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)ctx;
+
+ job = mali_pp_job_create(session, uargs, mali_scheduler_get_new_id());
+ if (NULL == job) {
+ MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(job);
+
+ point = mali_scheduler_submit_pp_job(session, job);
+ job = NULL;
+
+ if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+ /*
+ * Let user space know that something failed
+ * after the job was started.
+ */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx,
+ _mali_uk_pp_and_gp_start_job_s *uargs)
+{
+ struct mali_session_data *session;
+ _mali_uk_pp_and_gp_start_job_s kargs;
+ struct mali_pp_job *pp_job;
+ struct mali_gp_job *gp_job;
+ u32 __user *point_ptr = NULL;
+ mali_timeline_point point;
+ _mali_uk_pp_start_job_s __user *pp_args;
+ _mali_uk_gp_start_job_s __user *gp_args;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+
+ session = (struct mali_session_data *) ctx;
+
+ if (0 != _mali_osk_copy_from_user(&kargs, uargs,
+ sizeof(_mali_uk_pp_and_gp_start_job_s))) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ pp_args = (_mali_uk_pp_start_job_s __user *)(uintptr_t)kargs.pp_args;
+ gp_args = (_mali_uk_gp_start_job_s __user *)(uintptr_t)kargs.gp_args;
+
+ pp_job = mali_pp_job_create(session, pp_args,
+ mali_scheduler_get_new_id());
+ if (NULL == pp_job) {
+ MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ gp_job = mali_gp_job_create(session, gp_args,
+ mali_scheduler_get_new_id(),
+ mali_pp_job_get_tracker(pp_job));
+ if (NULL == gp_job) {
+ MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+ mali_pp_job_delete(pp_job);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(pp_job);
+
+ /* Submit GP job. */
+ mali_scheduler_submit_gp_job(session, gp_job);
+ gp_job = NULL;
+
+ /* Submit PP job. */
+ point = mali_scheduler_submit_pp_job(session, pp_job);
+ pp_job = NULL;
+
+ if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+ /*
+ * Let user space know that something failed
+ * after the jobs were started.
+ */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+ u32 fb_lookup_id;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ fb_lookup_id = args->fb_id & MALI_PP_JOB_FB_LOOKUP_LIST_MASK;
+
+ mali_scheduler_lock();
+
+ /* Iterate over all jobs for given frame builder_id. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp,
+ &session->pp_job_fb_lookup_list[fb_lookup_id],
+ struct mali_pp_job, session_fb_lookup_list) {
+ MALI_DEBUG_CODE(u32 disable_mask = 0);
+
+ if (mali_pp_job_get_frame_builder_id(job) !=
+ (u32) args->fb_id) {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Disable WB mismatching FB.\n"));
+ continue;
+ }
+
+ MALI_DEBUG_CODE(disable_mask |= 0xD << (4 * 3));
+
+ if (mali_pp_job_get_wb0_source_addr(job) == args->wb0_memory) {
+ MALI_DEBUG_CODE(disable_mask |= 0x1 << (4 * 1));
+ mali_pp_job_disable_wb0(job);
+ }
+
+ if (mali_pp_job_get_wb1_source_addr(job) == args->wb1_memory) {
+ MALI_DEBUG_CODE(disable_mask |= 0x2 << (4 * 2));
+ mali_pp_job_disable_wb1(job);
+ }
+
+ if (mali_pp_job_get_wb2_source_addr(job) == args->wb2_memory) {
+ MALI_DEBUG_CODE(disable_mask |= 0x3 << (4 * 3));
+ mali_pp_job_disable_wb2(job);
+ }
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disable WB: 0x%X.\n",
+ disable_mask));
+ }
+
+ mali_scheduler_unlock();
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_scheduler_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "GP queues\n");
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tQueue depth: %u\n", job_queue_gp.depth);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tNormal priority queue is %s\n",
+ _mali_osk_list_empty(&job_queue_gp.normal_pri) ?
+ "empty" : "not empty");
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tHigh priority queue is %s\n",
+ _mali_osk_list_empty(&job_queue_gp.high_pri) ?
+ "empty" : "not empty");
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "PP queues\n");
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tQueue depth: %u\n", job_queue_pp.depth);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tNormal priority queue is %s\n",
+ _mali_osk_list_empty(&job_queue_pp.normal_pri)
+ ? "empty" : "not empty");
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tHigh priority queue is %s\n",
+ _mali_osk_list_empty(&job_queue_pp.high_pri)
+ ? "empty" : "not empty");
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ return n;
+}
+#endif
+
+/*
+ * ---------- Implementation of static functions ----------
+ */
+
+static mali_timeline_point mali_scheduler_submit_gp_job(
+ struct mali_session_data *session, struct mali_gp_job *job)
+{
+ mali_timeline_point point;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* Add job to Timeline system. */
+ point = mali_timeline_system_add_tracker(session->timeline_system,
+ mali_gp_job_get_tracker(job), MALI_TIMELINE_GP);
+
+ return point;
+}
+
+static mali_timeline_point mali_scheduler_submit_pp_job(
+ struct mali_session_data *session, struct mali_pp_job *job)
+{
+ mali_timeline_point point;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ mali_scheduler_lock();
+ /*
+ * Adding job to the lookup list used to quickly discard
+ * writeback units of queued jobs.
+ */
+ mali_pp_job_fb_lookup_add(job);
+ mali_scheduler_unlock();
+
+ /* Add job to Timeline system. */
+ point = mali_timeline_system_add_tracker(session->timeline_system,
+ mali_pp_job_get_tracker(job), MALI_TIMELINE_PP);
+
+ return point;
+}
+
+static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job)
+{
+ struct mali_session_data *session;
+ _mali_osk_list_t *queue;
+
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session = mali_gp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (unlikely(session->is_aborting)) {
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Job %u (0x%08X) queued while session is aborting.\n",
+ mali_gp_job_get_id(job), job));
+ return MALI_FALSE; /* job not queued */
+ }
+
+ mali_gp_job_set_cache_order(job, mali_scheduler_get_new_cache_order());
+
+ /* Determine which queue the job should be added to. */
+ if (session->use_high_priority_job_queue) {
+ queue = &job_queue_gp.high_pri;
+ } else {
+ queue = &job_queue_gp.normal_pri;
+ }
+
+ job_queue_gp.depth += 1;
+ job_queue_gp.big_job_num += (job->big_job) ? 1 : 0;
+
+ /* Add job to queue (mali_gp_job_queue_add find correct place). */
+ mali_gp_job_list_add(job, queue);
+
+ /*
+ * We hold a PM reference for every job we hold queued (and running)
+ * It is important that we take this reference after job has been
+ * added the the queue so that any runtime resume could schedule this
+ * job right there and then.
+ */
+ _mali_osk_pm_dev_ref_get_async();
+
+ if (mali_utilization_enabled()) {
+ /*
+ * We cheat a little bit by counting the GP as busy from the
+ * time a GP job is queued. This will be fine because we only
+ * loose the tiny idle gap between jobs, but we will instead
+ * get less utilization work to do (less locks taken)
+ */
+ mali_utilization_gp_start();
+ }
+
+ /* Add profiling events for job enqueued */
+ _mali_osk_profiling_add_event(
+ MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE,
+ mali_gp_job_get_pid(job),
+ mali_gp_job_get_tid(job),
+ mali_gp_job_get_frame_builder_id(job),
+ mali_gp_job_get_flush_id(job),
+ 0);
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_job_enqueue(mali_gp_job_get_tid(job),
+ mali_gp_job_get_id(job), "GP");
+#endif
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n",
+ mali_gp_job_get_id(job), job));
+
+ return MALI_TRUE; /* job queued */
+}
+
+static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job)
+{
+ struct mali_session_data *session;
+ _mali_osk_list_t *queue = NULL;
+
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session = mali_pp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (unlikely(session->is_aborting)) {
+ MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while session is aborting.\n",
+ mali_pp_job_get_id(job), job));
+ return MALI_FALSE; /* job not queued */
+ } else if (unlikely(MALI_SWAP_IN_FAIL == job->swap_status)) {
+ MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while swap in failed.\n",
+ mali_pp_job_get_id(job), job));
+ return MALI_FALSE;
+ }
+
+ mali_pp_job_set_cache_order(job, mali_scheduler_get_new_cache_order());
+
+ if (session->use_high_priority_job_queue) {
+ queue = &job_queue_pp.high_pri;
+ } else {
+ queue = &job_queue_pp.normal_pri;
+ }
+
+ job_queue_pp.depth +=
+ mali_pp_job_get_sub_job_count(job);
+
+ /* Add job to queue (mali_gp_job_queue_add find correct place). */
+ mali_pp_job_list_add(job, queue);
+
+ /*
+ * We hold a PM reference for every job we hold queued (and running)
+ * It is important that we take this reference after job has been
+ * added the the queue so that any runtime resume could schedule this
+ * job right there and then.
+ */
+ _mali_osk_pm_dev_ref_get_async();
+
+ if (mali_utilization_enabled()) {
+ /*
+ * We cheat a little bit by counting the PP as busy from the
+ * time a PP job is queued. This will be fine because we only
+ * loose the tiny idle gap between jobs, but we will instead
+ * get less utilization work to do (less locks taken)
+ */
+ mali_utilization_pp_start();
+ }
+
+ /* Add profiling events for job enqueued */
+
+ _mali_osk_profiling_add_event(
+ MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE,
+ mali_pp_job_get_pid(job),
+ mali_pp_job_get_tid(job),
+ mali_pp_job_get_frame_builder_id(job),
+ mali_pp_job_get_flush_id(job),
+ 0);
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_job_enqueue(mali_pp_job_get_tid(job),
+ mali_pp_job_get_id(job), "PP");
+#endif
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) with %u parts queued.\n",
+ mali_pp_job_is_virtual(job)
+ ? "Virtual" : "Physical",
+ mali_pp_job_get_id(job), job,
+ mali_pp_job_get_sub_job_count(job)));
+
+ return MALI_TRUE; /* job queued */
+}
+
+static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job,
+ mali_bool success)
+{
+ _mali_uk_gp_job_finished_s *jobres;
+ struct mali_session_data *session;
+ _mali_osk_notification_t *notification;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session = mali_gp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ notification = mali_gp_job_get_finished_notification(job);
+ MALI_DEBUG_ASSERT_POINTER(notification);
+
+ jobres = notification->result_buffer;
+ MALI_DEBUG_ASSERT_POINTER(jobres);
+
+ jobres->pending_big_job_num = mali_scheduler_job_gp_big_job_count();
+
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ if (MALI_TRUE == success) {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ } else {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+ jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job);
+ jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job);
+ jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job);
+
+ mali_session_send_notification(session, notification);
+}
+
+void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job,
+ u32 num_cores_in_virtual)
+{
+ u32 i;
+ u32 num_counters_to_copy;
+ _mali_uk_pp_job_finished_s *jobres;
+ struct mali_session_data *session;
+ _mali_osk_notification_t *notification;
+
+ if (MALI_TRUE == mali_pp_job_use_no_notification(job)) {
+ return;
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session = mali_pp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ notification = mali_pp_job_get_finished_notification(job);
+ MALI_DEBUG_ASSERT_POINTER(notification);
+
+ jobres = notification->result_buffer;
+ MALI_DEBUG_ASSERT_POINTER(jobres);
+
+ jobres->user_job_ptr = mali_pp_job_get_user_id(job);
+ if (MALI_TRUE == mali_pp_job_was_success(job)) {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ } else {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+
+ if (mali_pp_job_is_virtual(job)) {
+ num_counters_to_copy = num_cores_in_virtual;
+ } else {
+ num_counters_to_copy = mali_pp_job_get_sub_job_count(job);
+ }
+
+ for (i = 0; i < num_counters_to_copy; i++) {
+ jobres->perf_counter0[i] =
+ mali_pp_job_get_perf_counter_value0(job, i);
+ jobres->perf_counter1[i] =
+ mali_pp_job_get_perf_counter_value1(job, i);
+ jobres->perf_counter_src0 =
+ mali_pp_job_get_pp_counter_global_src0();
+ jobres->perf_counter_src1 =
+ mali_pp_job_get_pp_counter_global_src1();
+ }
+
+ mali_session_send_notification(session, notification);
+}
+
+static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock);
+ mali_pp_job_list_addtail(job, &scheduler_pp_job_deletion_queue);
+ _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock);
+
+ _mali_osk_wq_schedule_work(scheduler_wq_pp_job_delete);
+}
+
+void mali_scheduler_do_pp_job_delete(void *arg)
+{
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+
+ MALI_IGNORE(arg);
+
+ /*
+ * Quickly "unhook" the jobs pending to be deleted, so we can release
+ * the lock before we start deleting the job objects
+ * (without any locks held)
+ */
+ _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock);
+ _mali_osk_list_move_list(&scheduler_pp_job_deletion_queue, &list);
+ _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock);
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+ struct mali_pp_job, list) {
+ _mali_osk_list_delinit(&job->list);
+
+ mali_pp_job_delete(job); /* delete the job object itself */
+ }
+}
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+
+static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock);
+ mali_pp_job_list_addtail(job, &scheduler_pp_job_queue_list);
+ _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock);
+
+ _mali_osk_wq_schedule_work(scheduler_wq_pp_job_queue);
+}
+
+static void mali_scheduler_do_pp_job_queue(void *arg)
+{
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_IGNORE(arg);
+
+ /*
+ * Quickly "unhook" the jobs pending to be queued, so we can release
+ * the lock before we start queueing the job objects
+ * (without any locks held)
+ */
+ _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock);
+ _mali_osk_list_move_list(&scheduler_pp_job_queue_list, &list);
+ _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock);
+
+ /* First loop through all jobs and do the pre-work (no locks needed) */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+ struct mali_pp_job, list) {
+ if (mali_pp_job_needs_dma_buf_mapping(job)) {
+ /*
+ * This operation could fail, but we continue anyway,
+ * because the worst that could happen is that this
+ * job will fail due to a Mali page fault.
+ */
+ mali_dma_buf_map_job(job);
+ }
+ }
+
+ mali_scheduler_lock();
+
+ /* Then loop through all jobs again to queue them (lock needed) */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+ struct mali_pp_job, list) {
+
+ /* Remove from scheduler_pp_job_queue_list before queueing */
+ mali_pp_job_list_remove(job);
+
+ if (mali_scheduler_queue_pp_job(job)) {
+ /* Job queued successfully */
+ schedule_mask |= MALI_SCHEDULER_MASK_PP;
+ } else {
+ /* Failed to enqueue job, release job (with error) */
+ mali_pp_job_fb_lookup_remove(job);
+ mali_pp_job_mark_unstarted_failed(job);
+
+ /* unlock scheduler in this uncommon case */
+ mali_scheduler_unlock();
+
+ schedule_mask |= mali_timeline_tracker_release(
+ mali_pp_job_get_tracker(job));
+
+ /* Notify user space and close the job object */
+ mali_scheduler_complete_pp_job(job, 0, MALI_TRUE,
+ MALI_FALSE);
+
+ mali_scheduler_lock();
+ }
+ }
+
+ mali_scheduler_unlock();
+
+ /* Trigger scheduling of jobs */
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+void mali_scheduler_gp_pp_job_queue_print(void)
+{
+ struct mali_gp_job *gp_job = NULL;
+ struct mali_gp_job *tmp_gp_job = NULL;
+ struct mali_pp_job *pp_job = NULL;
+ struct mali_pp_job *tmp_pp_job = NULL;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_executor_lock_obj);
+
+ /* dump job queup status */
+ if ((0 == job_queue_gp.depth) && (0 == job_queue_pp.depth)) {
+ MALI_PRINT(("No GP&PP job in the job queue.\n"));
+ return;
+ }
+
+ MALI_PRINT(("Total (%d) GP job in the job queue.\n", job_queue_gp.depth));
+ if (job_queue_gp.depth > 0) {
+ if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) {
+ _MALI_OSK_LIST_FOREACHENTRY(gp_job, tmp_gp_job, &job_queue_gp.high_pri,
+ struct mali_gp_job, list) {
+ MALI_PRINT(("GP job(%p) id = %d tid = %d pid = %d in the gp job high_pri queue\n", gp_job, gp_job->id, gp_job->tid, gp_job->pid));
+ }
+ }
+
+ if (!_mali_osk_list_empty(&job_queue_gp.normal_pri)) {
+ _MALI_OSK_LIST_FOREACHENTRY(gp_job, tmp_gp_job, &job_queue_gp.normal_pri,
+ struct mali_gp_job, list) {
+ MALI_PRINT(("GP job(%p) id = %d tid = %d pid = %d in the gp job normal_pri queue\n", gp_job, gp_job->id, gp_job->tid, gp_job->pid));
+ }
+ }
+ }
+
+ MALI_PRINT(("Total (%d) PP job in the job queue.\n", job_queue_pp.depth));
+ if (job_queue_pp.depth > 0) {
+ if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, tmp_pp_job, &job_queue_pp.high_pri,
+ struct mali_pp_job, list) {
+ if (mali_pp_job_is_virtual(pp_job)) {
+ MALI_PRINT(("PP Virtual job(%p) id = %d tid = %d pid = %d in the pp job high_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
+ } else {
+ MALI_PRINT(("PP Physical job(%p) id = %d tid = %d pid = %d in the pp job high_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
+ }
+ }
+ }
+
+ if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, tmp_pp_job, &job_queue_pp.normal_pri,
+ struct mali_pp_job, list) {
+ if (mali_pp_job_is_virtual(pp_job)) {
+ MALI_PRINT(("PP Virtual job(%p) id = %d tid = %d pid = %d in the pp job normal_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
+ } else {
+ MALI_PRINT(("PP Physical job(%p) id = %d tid = %d pid = %d in the pp job normal_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid));
+ }
+ }
+ }
+ }
+
+ /* dump group running job status */
+ mali_executor_running_status_print();
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_scheduler.h b/drivers/gpu/arm/utgard/common/mali_scheduler.h
new file mode 100644
index 000000000000..f24cf42b8a79
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_scheduler.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SCHEDULER_H__
+#define __MALI_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_scheduler_types.h"
+#include "mali_session.h"
+
+struct mali_scheduler_job_queue {
+ _MALI_OSK_LIST_HEAD(normal_pri); /* Queued jobs with normal priority */
+ _MALI_OSK_LIST_HEAD(high_pri); /* Queued jobs with high priority */
+ u32 depth; /* Depth of combined queues. */
+ u32 big_job_num;
+};
+
+extern _mali_osk_spinlock_irq_t *mali_scheduler_lock_obj;
+
+/* Queue of jobs to be executed on the GP group */
+extern struct mali_scheduler_job_queue job_queue_gp;
+
+/* Queue of PP jobs */
+extern struct mali_scheduler_job_queue job_queue_pp;
+
+extern _mali_osk_atomic_t mali_job_id_autonumber;
+extern _mali_osk_atomic_t mali_job_cache_order_autonumber;
+
+#define MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD() MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+_mali_osk_errcode_t mali_scheduler_initialize(void);
+void mali_scheduler_terminate(void);
+
+MALI_STATIC_INLINE void mali_scheduler_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(mali_scheduler_lock_obj);
+ MALI_DEBUG_PRINT(5, ("Mali scheduler: scheduler lock taken.\n"));
+}
+
+MALI_STATIC_INLINE void mali_scheduler_unlock(void)
+{
+ MALI_DEBUG_PRINT(5, ("Mali scheduler: Releasing scheduler lock.\n"));
+ _mali_osk_spinlock_irq_unlock(mali_scheduler_lock_obj);
+}
+
+MALI_STATIC_INLINE u32 mali_scheduler_job_gp_count(void)
+{
+ return job_queue_gp.depth;
+}
+MALI_STATIC_INLINE u32 mali_scheduler_job_gp_big_job_count(void)
+{
+ return job_queue_gp.big_job_num;
+}
+
+u32 mali_scheduler_job_physical_head_count(void);
+
+mali_bool mali_scheduler_job_next_is_virtual(void);
+
+struct mali_gp_job *mali_scheduler_job_gp_get(void);
+struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void);
+struct mali_pp_job *mali_scheduler_job_pp_virtual_peek(void);
+struct mali_pp_job *mali_scheduler_job_pp_physical_get(u32 *sub_job);
+struct mali_pp_job *mali_scheduler_job_pp_virtual_get(void);
+
+MALI_STATIC_INLINE u32 mali_scheduler_get_new_id(void)
+{
+ return _mali_osk_atomic_inc_return(&mali_job_id_autonumber);
+}
+
+MALI_STATIC_INLINE u32 mali_scheduler_get_new_cache_order(void)
+{
+ return _mali_osk_atomic_inc_return(&mali_job_cache_order_autonumber);
+}
+
+/**
+ * @brief Used by the Timeline system to queue a GP job.
+ *
+ * @note @ref mali_executor_schedule_from_mask() should be called if this
+ * function returns non-zero.
+ *
+ * @param job The GP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is
+ * necessary after this call.
+ */
+mali_scheduler_mask mali_scheduler_activate_gp_job(struct mali_gp_job *job);
+
+/**
+ * @brief Used by the Timeline system to queue a PP job.
+ *
+ * @note @ref mali_executor_schedule_from_mask() should be called if this
+ * function returns non-zero.
+ *
+ * @param job The PP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is
+ * necessary after this call.
+ */
+mali_scheduler_mask mali_scheduler_activate_pp_job(struct mali_pp_job *job);
+
+void mali_scheduler_complete_gp_job(struct mali_gp_job *job,
+ mali_bool success,
+ mali_bool user_notification,
+ mali_bool dequeued);
+
+void mali_scheduler_complete_pp_job(struct mali_pp_job *job,
+ u32 num_cores_in_virtual,
+ mali_bool user_notification,
+ mali_bool dequeued);
+
+void mali_scheduler_abort_session(struct mali_session_data *session);
+
+void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job,
+ u32 num_cores_in_virtual);
+
+#if MALI_STATE_TRACKING
+u32 mali_scheduler_dump_state(char *buf, u32 size);
+#endif
+
+void mali_scheduler_gp_pp_job_queue_print(void);
+
+#endif /* __MALI_SCHEDULER_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_scheduler_types.h b/drivers/gpu/arm/utgard/common/mali_scheduler_types.h
new file mode 100644
index 000000000000..f862961d146e
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_scheduler_types.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SCHEDULER_TYPES_H__
+#define __MALI_SCHEDULER_TYPES_H__
+
+#include "mali_osk.h"
+
+#define MALI_SCHEDULER_JOB_ID_SPAN 65535
+
+/**
+ * Bitmask used for defered scheduling of subsystems.
+ */
+typedef u32 mali_scheduler_mask;
+
+#define MALI_SCHEDULER_MASK_GP (1<<0)
+#define MALI_SCHEDULER_MASK_PP (1<<1)
+
+#define MALI_SCHEDULER_MASK_EMPTY 0
+#define MALI_SCHEDULER_MASK_ALL (MALI_SCHEDULER_MASK_GP | MALI_SCHEDULER_MASK_PP)
+
+#endif /* __MALI_SCHEDULER_TYPES_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_session.c b/drivers/gpu/arm/utgard/common/mali_session.c
new file mode 100644
index 000000000000..e0a2805b13d3
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_session.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_session.h"
+#include "mali_ukk.h"
+#ifdef MALI_MEM_SWAP_TRACKING
+#include "mali_memory_swap_alloc.h"
+#endif
+
+_MALI_OSK_LIST_HEAD(mali_sessions);
+static u32 mali_session_count = 0;
+
+_mali_osk_spinlock_irq_t *mali_sessions_lock = NULL;
+wait_queue_head_t pending_queue;
+
+_mali_osk_errcode_t mali_session_initialize(void)
+{
+ _MALI_OSK_INIT_LIST_HEAD(&mali_sessions);
+ /* init wait queue for big varying job */
+ init_waitqueue_head(&pending_queue);
+
+ mali_sessions_lock = _mali_osk_spinlock_irq_init(
+ _MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_SESSIONS);
+ if (NULL == mali_sessions_lock) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_session_terminate(void)
+{
+ if (NULL != mali_sessions_lock) {
+ _mali_osk_spinlock_irq_term(mali_sessions_lock);
+ mali_sessions_lock = NULL;
+ }
+}
+
+void mali_session_add(struct mali_session_data *session)
+{
+ mali_session_lock();
+ _mali_osk_list_add(&session->link, &mali_sessions);
+ mali_session_count++;
+ mali_session_unlock();
+}
+
+void mali_session_remove(struct mali_session_data *session)
+{
+ mali_session_lock();
+ _mali_osk_list_delinit(&session->link);
+ mali_session_count--;
+ mali_session_unlock();
+}
+
+u32 mali_session_get_count(void)
+{
+ return mali_session_count;
+}
+
+wait_queue_head_t *mali_session_get_wait_queue(void)
+{
+ return &pending_queue;
+}
+
+/*
+ * Get the max completed window jobs from all active session,
+ * which will be used in window render frame per sec calculate
+ */
+#if defined(CONFIG_MALI_DVFS)
+u32 mali_session_max_window_num(void)
+{
+ struct mali_session_data *session, *tmp;
+ u32 max_window_num = 0;
+ u32 tmp_number = 0;
+
+ mali_session_lock();
+
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ tmp_number = _mali_osk_atomic_xchg(
+ &session->number_of_window_jobs, 0);
+ if (max_window_num < tmp_number) {
+ max_window_num = tmp_number;
+ }
+ }
+
+ mali_session_unlock();
+
+ return max_window_num;
+}
+#endif
+
+void mali_session_memory_tracking(_mali_osk_print_ctx *print_ctx)
+{
+ struct mali_session_data *session, *tmp;
+ u32 mali_mem_usage;
+ u32 total_mali_mem_size;
+#ifdef MALI_MEM_SWAP_TRACKING
+ u32 swap_pool_size;
+ u32 swap_unlock_size;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(print_ctx);
+ mali_session_lock();
+ MALI_SESSION_FOREACH(session, tmp, link) {
+#ifdef MALI_MEM_SWAP_TRACKING
+ _mali_osk_ctxprintf(print_ctx, " %-25s %-10u %-10u %-15u %-15u %-10u %-10u %-10u\n",
+ session->comm, session->pid,
+ (atomic_read(&session->mali_mem_allocated_pages)) * _MALI_OSK_MALI_PAGE_SIZE,
+ session->max_mali_mem_allocated_size,
+ (atomic_read(&session->mali_mem_array[MALI_MEM_EXTERNAL])) * _MALI_OSK_MALI_PAGE_SIZE,
+ (atomic_read(&session->mali_mem_array[MALI_MEM_UMP])) * _MALI_OSK_MALI_PAGE_SIZE,
+ (atomic_read(&session->mali_mem_array[MALI_MEM_DMA_BUF])) * _MALI_OSK_MALI_PAGE_SIZE,
+ (atomic_read(&session->mali_mem_array[MALI_MEM_SWAP])) * _MALI_OSK_MALI_PAGE_SIZE
+ );
+#else
+ _mali_osk_ctxprintf(print_ctx, " %-25s %-10u %-10u %-15u %-15u %-10u %-10u \n",
+ session->comm, session->pid,
+ (atomic_read(&session->mali_mem_allocated_pages)) * _MALI_OSK_MALI_PAGE_SIZE,
+ session->max_mali_mem_allocated_size,
+ (atomic_read(&session->mali_mem_array[MALI_MEM_EXTERNAL])) * _MALI_OSK_MALI_PAGE_SIZE,
+ (atomic_read(&session->mali_mem_array[MALI_MEM_UMP])) * _MALI_OSK_MALI_PAGE_SIZE,
+ (atomic_read(&session->mali_mem_array[MALI_MEM_DMA_BUF])) * _MALI_OSK_MALI_PAGE_SIZE
+ );
+#endif
+ }
+ mali_session_unlock();
+ mali_mem_usage = _mali_ukk_report_memory_usage();
+ total_mali_mem_size = _mali_ukk_report_total_memory_size();
+ _mali_osk_ctxprintf(print_ctx, "Mali mem usage: %u\nMali mem limit: %u\n", mali_mem_usage, total_mali_mem_size);
+#ifdef MALI_MEM_SWAP_TRACKING
+ mali_mem_swap_tracking(&swap_pool_size, &swap_unlock_size);
+ _mali_osk_ctxprintf(print_ctx, "Mali swap mem pool : %u\nMali swap mem unlock: %u\n", swap_pool_size, swap_unlock_size);
+#endif
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_session.h b/drivers/gpu/arm/utgard/common/mali_session.h
new file mode 100644
index 000000000000..6791b2b5f110
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_session.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SESSION_H__
+#define __MALI_SESSION_H__
+
+#include "mali_mmu_page_directory.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_memory_types.h"
+#include "mali_memory_manager.h"
+
+struct mali_timeline_system;
+struct mali_soft_system;
+
+/* Number of frame builder job lists per session. */
+#define MALI_PP_JOB_FB_LOOKUP_LIST_SIZE 16
+#define MALI_PP_JOB_FB_LOOKUP_LIST_MASK (MALI_PP_JOB_FB_LOOKUP_LIST_SIZE - 1)
+/*Max pending big job allowed in kernel*/
+#define MALI_MAX_PENDING_BIG_JOB (2)
+
+struct mali_session_data {
+ _mali_osk_notification_queue_t *ioctl_queue;
+
+ _mali_osk_mutex_t *memory_lock; /**< Lock protecting the vm manipulation */
+#if 0
+ _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */
+#endif
+ struct mali_page_directory *page_directory; /**< MMU page directory for this session */
+
+ _MALI_OSK_LIST_HEAD(link); /**< Link for list of all sessions */
+ _MALI_OSK_LIST_HEAD(pp_job_list); /**< List of all PP jobs on this session */
+
+#if defined(CONFIG_MALI_DVFS)
+ _mali_osk_atomic_t number_of_window_jobs; /**< Record the window jobs completed on this session in a period */
+#endif
+
+ _mali_osk_list_t pp_job_fb_lookup_list[MALI_PP_JOB_FB_LOOKUP_LIST_SIZE]; /**< List of PP job lists per frame builder id. Used to link jobs from same frame builder. */
+
+ struct mali_soft_job_system *soft_job_system; /**< Soft job system for this session. */
+ struct mali_timeline_system *timeline_system; /**< Timeline system for this session. */
+
+ mali_bool is_aborting; /**< MALI_TRUE if the session is aborting, MALI_FALSE if not. */
+ mali_bool use_high_priority_job_queue; /**< If MALI_TRUE, jobs added from this session will use the high priority job queues. */
+ u32 pid;
+ char *comm;
+ atomic_t mali_mem_array[MALI_MEM_TYPE_MAX]; /**< The array to record mem types' usage for this session. */
+ atomic_t mali_mem_allocated_pages; /** The current allocated mali memory pages, which include mali os memory and mali dedicated memory.*/
+ size_t max_mali_mem_allocated_size; /**< The past max mali memory allocated size, which include mali os memory and mali dedicated memory. */
+ /* Added for new memroy system */
+ struct mali_allocation_manager allocation_mgr;
+};
+
+_mali_osk_errcode_t mali_session_initialize(void);
+void mali_session_terminate(void);
+
+/* List of all sessions. Actual list head in mali_kernel_core.c */
+extern _mali_osk_list_t mali_sessions;
+/* Lock to protect modification and access to the mali_sessions list */
+extern _mali_osk_spinlock_irq_t *mali_sessions_lock;
+
+MALI_STATIC_INLINE void mali_session_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(mali_sessions_lock);
+}
+
+MALI_STATIC_INLINE void mali_session_unlock(void)
+{
+ _mali_osk_spinlock_irq_unlock(mali_sessions_lock);
+}
+
+void mali_session_add(struct mali_session_data *session);
+void mali_session_remove(struct mali_session_data *session);
+u32 mali_session_get_count(void);
+wait_queue_head_t *mali_session_get_wait_queue(void);
+
+#define MALI_SESSION_FOREACH(session, tmp, link) \
+ _MALI_OSK_LIST_FOREACHENTRY(session, tmp, &mali_sessions, struct mali_session_data, link)
+
+MALI_STATIC_INLINE struct mali_page_directory *mali_session_get_page_directory(struct mali_session_data *session)
+{
+ return session->page_directory;
+}
+
+MALI_STATIC_INLINE void mali_session_memory_lock(struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(session);
+ _mali_osk_mutex_wait(session->memory_lock);
+}
+
+MALI_STATIC_INLINE void mali_session_memory_unlock(struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(session);
+ _mali_osk_mutex_signal(session->memory_lock);
+}
+
+MALI_STATIC_INLINE void mali_session_send_notification(struct mali_session_data *session, _mali_osk_notification_t *object)
+{
+ _mali_osk_notification_queue_send(session->ioctl_queue, object);
+}
+
+#if defined(CONFIG_MALI_DVFS)
+
+MALI_STATIC_INLINE void mali_session_inc_num_window_jobs(struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(session);
+ _mali_osk_atomic_inc(&session->number_of_window_jobs);
+}
+
+/*
+ * Get the max completed window jobs from all active session,
+ * which will be used in window render frame per sec calculate
+ */
+u32 mali_session_max_window_num(void);
+
+#endif
+
+void mali_session_memory_tracking(_mali_osk_print_ctx *print_ctx);
+
+#endif /* __MALI_SESSION_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_soft_job.c b/drivers/gpu/arm/utgard/common/mali_soft_job.c
new file mode 100644
index 000000000000..36ac982e1df0
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_soft_job.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_soft_job.h"
+#include "mali_osk.h"
+#include "mali_timeline.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_scheduler.h"
+#include "mali_executor.h"
+
+MALI_STATIC_INLINE void mali_soft_job_system_lock(struct mali_soft_job_system *system)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+ _mali_osk_spinlock_irq_lock(system->lock);
+ MALI_DEBUG_PRINT(5, ("Mali Soft Job: soft system %p lock taken\n", system));
+ MALI_DEBUG_ASSERT(0 == system->lock_owner);
+ MALI_DEBUG_CODE(system->lock_owner = _mali_osk_get_tid());
+}
+
+MALI_STATIC_INLINE void mali_soft_job_system_unlock(struct mali_soft_job_system *system)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_PRINT(5, ("Mali Soft Job: releasing soft system %p lock\n", system));
+ MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner);
+ MALI_DEBUG_CODE(system->lock_owner = 0);
+ _mali_osk_spinlock_irq_unlock(system->lock);
+}
+
+#if defined(DEBUG)
+MALI_STATIC_INLINE void mali_soft_job_system_assert_locked(struct mali_soft_job_system *system)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner);
+}
+#define MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system) mali_soft_job_system_assert_locked(system)
+#else
+#define MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system)
+#endif /* defined(DEBUG) */
+
+struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session)
+{
+ struct mali_soft_job_system *system;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ system = (struct mali_soft_job_system *) _mali_osk_calloc(1, sizeof(struct mali_soft_job_system));
+ if (NULL == system) {
+ return NULL;
+ }
+
+ system->session = session;
+
+ system->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+ if (NULL == system->lock) {
+ mali_soft_job_system_destroy(system);
+ return NULL;
+ }
+ system->lock_owner = 0;
+ system->last_job_id = 0;
+
+ _MALI_OSK_INIT_LIST_HEAD(&(system->jobs_used));
+
+ return system;
+}
+
+void mali_soft_job_system_destroy(struct mali_soft_job_system *system)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ /* All jobs should be free at this point. */
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&(system->jobs_used)));
+
+ if (NULL != system) {
+ if (NULL != system->lock) {
+ _mali_osk_spinlock_irq_term(system->lock);
+ }
+ _mali_osk_free(system);
+ }
+}
+
+static void mali_soft_job_system_free_job(struct mali_soft_job_system *system, struct mali_soft_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_soft_job_system_lock(job->system);
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
+ MALI_DEBUG_ASSERT(system == job->system);
+
+ _mali_osk_list_del(&(job->system_list));
+
+ mali_soft_job_system_unlock(job->system);
+
+ _mali_osk_free(job);
+}
+
+MALI_STATIC_INLINE struct mali_soft_job *mali_soft_job_system_lookup_job(struct mali_soft_job_system *system, u32 job_id)
+{
+ struct mali_soft_job *job, *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system);
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) {
+ if (job->id == job_id)
+ return job;
+ }
+
+ return NULL;
+}
+
+void mali_soft_job_destroy(struct mali_soft_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->system);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: destroying soft job %u (0x%08X)\n", job->id, job));
+
+ if (NULL != job) {
+ if (0 < _mali_osk_atomic_dec_return(&job->refcount)) return;
+
+ _mali_osk_atomic_term(&job->refcount);
+
+ if (NULL != job->activated_notification) {
+ _mali_osk_notification_delete(job->activated_notification);
+ job->activated_notification = NULL;
+ }
+
+ mali_soft_job_system_free_job(job->system, job);
+ }
+}
+
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job)
+{
+ struct mali_soft_job *job;
+ _mali_osk_notification_t *notification = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT((MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) ||
+ (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == type));
+
+ notification = _mali_osk_notification_create(_MALI_NOTIFICATION_SOFT_ACTIVATED, sizeof(_mali_uk_soft_job_activated_s));
+ if (unlikely(NULL == notification)) {
+ MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate notification"));
+ return NULL;
+ }
+
+ job = _mali_osk_malloc(sizeof(struct mali_soft_job));
+ if (unlikely(NULL == job)) {
+ MALI_DEBUG_PRINT(2, ("Mali Soft Job: system alloc job failed. \n"));
+ return NULL;
+ }
+
+ mali_soft_job_system_lock(system);
+
+ job->system = system;
+ job->id = system->last_job_id++;
+ job->state = MALI_SOFT_JOB_STATE_ALLOCATED;
+
+ _mali_osk_list_add(&(job->system_list), &(system->jobs_used));
+
+ job->type = type;
+ job->user_job = user_job;
+ job->activated = MALI_FALSE;
+
+ job->activated_notification = notification;
+
+ _mali_osk_atomic_init(&job->refcount, 1);
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state);
+ MALI_DEBUG_ASSERT(system == job->system);
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id);
+
+ mali_soft_job_system_unlock(system);
+
+ return job;
+}
+
+mali_timeline_point mali_soft_job_start(struct mali_soft_job *job, struct mali_timeline_fence *fence)
+{
+ mali_timeline_point point;
+ struct mali_soft_job_system *system;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ MALI_DEBUG_ASSERT_POINTER(job->system);
+ system = job->system;
+
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT_POINTER(system->session->timeline_system);
+
+ mali_soft_job_system_lock(system);
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state);
+ job->state = MALI_SOFT_JOB_STATE_STARTED;
+
+ mali_soft_job_system_unlock(system);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: starting soft job %u (0x%08X)\n", job->id, job));
+
+ mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_SOFT, fence, job);
+ point = mali_timeline_system_add_tracker(system->session->timeline_system, &job->tracker, MALI_TIMELINE_SOFT);
+
+ return point;
+}
+
+static mali_bool mali_soft_job_is_activated(void *data)
+{
+ struct mali_soft_job *job;
+
+ job = (struct mali_soft_job *) data;
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ return job->activated;
+}
+
+_mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system *system, u32 job_id)
+{
+ struct mali_soft_job *job;
+ struct mali_timeline_system *timeline_system;
+ mali_scheduler_mask schedule_mask;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_soft_job_system_lock(system);
+
+ job = mali_soft_job_system_lookup_job(system, job_id);
+
+ if ((NULL == job) || (MALI_SOFT_JOB_TYPE_USER_SIGNALED != job->type)
+ || !(MALI_SOFT_JOB_STATE_STARTED == job->state || MALI_SOFT_JOB_STATE_TIMED_OUT == job->state)) {
+ mali_soft_job_system_unlock(system);
+ MALI_PRINT_ERROR(("Mali Soft Job: invalid soft job id %u", job_id));
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ if (MALI_SOFT_JOB_STATE_TIMED_OUT == job->state) {
+ job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+ mali_soft_job_system_unlock(system);
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == job->activated);
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: soft job %u (0x%08X) was timed out\n", job->id, job));
+ mali_soft_job_destroy(job);
+
+ return _MALI_OSK_ERR_TIMEOUT;
+ }
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+ job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+ mali_soft_job_system_unlock(system);
+
+ /* Since the job now is in signaled state, timeouts from the timeline system will be
+ * ignored, and it is not possible to signal this job again. */
+
+ timeline_system = system->session->timeline_system;
+ MALI_DEBUG_ASSERT_POINTER(timeline_system);
+
+ /* Wait until activated. */
+ _mali_osk_wait_queue_wait_event(timeline_system->wait_queue, mali_soft_job_is_activated, (void *) job);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: signaling soft job %u (0x%08X)\n", job->id, job));
+
+ schedule_mask = mali_timeline_tracker_release(&job->tracker);
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
+
+ mali_soft_job_destroy(job);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static void mali_soft_job_send_activated_notification(struct mali_soft_job *job)
+{
+ if (NULL != job->activated_notification) {
+ _mali_uk_soft_job_activated_s *res = job->activated_notification->result_buffer;
+ res->user_job = job->user_job;
+ mali_session_send_notification(job->system->session, job->activated_notification);
+ }
+ job->activated_notification = NULL;
+}
+
+mali_scheduler_mask mali_soft_job_system_activate_job(struct mali_soft_job *job)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->system);
+ MALI_DEBUG_ASSERT_POINTER(job->system->session);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeline activation for soft job %u (0x%08X).\n", job->id, job));
+
+ mali_soft_job_system_lock(job->system);
+
+ if (unlikely(job->system->session->is_aborting)) {
+ MALI_DEBUG_PRINT(3, ("Mali Soft Job: Soft job %u (0x%08X) activated while session is aborting.\n", job->id, job));
+
+ mali_soft_job_system_unlock(job->system);
+
+ /* Since we are in shutdown, we can ignore the scheduling bitmask. */
+ mali_timeline_tracker_release(&job->tracker);
+ mali_soft_job_destroy(job);
+ return schedule_mask;
+ }
+
+ /* Send activated notification. */
+ mali_soft_job_send_activated_notification(job);
+
+ /* Wake up sleeping signaler. */
+ job->activated = MALI_TRUE;
+
+ /* If job type is self signaled, release tracker, move soft job to free list, and scheduler at once */
+ if (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == job->type) {
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+ job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+ mali_soft_job_system_unlock(job->system);
+
+ schedule_mask |= mali_timeline_tracker_release(&job->tracker);
+
+ mali_soft_job_destroy(job);
+ } else {
+ _mali_osk_wait_queue_wake_up(job->tracker.system->wait_queue);
+
+ mali_soft_job_system_unlock(job->system);
+ }
+
+ return schedule_mask;
+}
+
+mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->system);
+ MALI_DEBUG_ASSERT_POINTER(job->system->session);
+ MALI_DEBUG_ASSERT(MALI_TRUE == job->activated);
+
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeline timeout for soft job %u (0x%08X).\n", job->id, job));
+
+ mali_soft_job_system_lock(job->system);
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state ||
+ MALI_SOFT_JOB_STATE_SIGNALED == job->state);
+
+ if (unlikely(job->system->session->is_aborting)) {
+ /* The session is aborting. This job will be released and destroyed by @ref
+ * mali_soft_job_system_abort(). */
+ mali_soft_job_system_unlock(job->system);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ if (MALI_SOFT_JOB_STATE_STARTED != job->state) {
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_SIGNALED == job->state);
+
+ /* The job is about to be signaled, ignore timeout. */
+ MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeout on soft job %u (0x%08X) in signaled state.\n", job->id, job));
+ mali_soft_job_system_unlock(job->system);
+ return schedule_mask;
+ }
+
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state);
+
+ job->state = MALI_SOFT_JOB_STATE_TIMED_OUT;
+ _mali_osk_atomic_inc(&job->refcount);
+
+ mali_soft_job_system_unlock(job->system);
+
+ schedule_mask = mali_timeline_tracker_release(&job->tracker);
+
+ mali_soft_job_destroy(job);
+
+ return schedule_mask;
+}
+
+void mali_soft_job_system_abort(struct mali_soft_job_system *system)
+{
+ struct mali_soft_job *job, *tmp;
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(jobs);
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+ MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting soft job system for session 0x%08X.\n", system->session));
+
+ mali_soft_job_system_lock(system);
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) {
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state ||
+ MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
+
+ if (MALI_SOFT_JOB_STATE_STARTED == job->state) {
+ /* If the job has been activated, we have to release the tracker and destroy
+ * the job. If not, the tracker will be released and the job destroyed when
+ * it is activated. */
+ if (MALI_TRUE == job->activated) {
+ MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting unsignaled soft job %u (0x%08X).\n", job->id, job));
+
+ job->state = MALI_SOFT_JOB_STATE_SIGNALED;
+ _mali_osk_list_move(&job->system_list, &jobs);
+ }
+ } else if (MALI_SOFT_JOB_STATE_TIMED_OUT == job->state) {
+ MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting timed out soft job %u (0x%08X).\n", job->id, job));
+
+ /* We need to destroy this soft job. */
+ _mali_osk_list_move(&job->system_list, &jobs);
+ }
+ }
+
+ mali_soft_job_system_unlock(system);
+
+ /* Release and destroy jobs. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &jobs, struct mali_soft_job, system_list) {
+ MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_SIGNALED == job->state ||
+ MALI_SOFT_JOB_STATE_TIMED_OUT == job->state);
+
+ if (MALI_SOFT_JOB_STATE_SIGNALED == job->state) {
+ mali_timeline_tracker_release(&job->tracker);
+ }
+
+ /* Move job back to used list before destroying. */
+ _mali_osk_list_move(&job->system_list, &system->jobs_used);
+
+ mali_soft_job_destroy(job);
+ }
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_soft_job.h b/drivers/gpu/arm/utgard/common/mali_soft_job.h
new file mode 100644
index 000000000000..f35394e60384
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_soft_job.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SOFT_JOB_H__
+#define __MALI_SOFT_JOB_H__
+
+#include "mali_osk.h"
+
+#include "mali_timeline.h"
+
+struct mali_timeline_fence;
+struct mali_session_data;
+struct mali_soft_job;
+struct mali_soft_job_system;
+
+/**
+ * Soft job types.
+ *
+ * Soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED will only complete after activation if either
+ * they are signaled by user-space (@ref mali_soft_job_system_signaled_job) or if they are timed out
+ * by the Timeline system.
+ * Soft jobs of type MALI_SOFT_JOB_TYPE_SELF_SIGNALED will release job resource automatically
+ * in kernel when the job is activated.
+ */
+typedef enum mali_soft_job_type {
+ MALI_SOFT_JOB_TYPE_SELF_SIGNALED,
+ MALI_SOFT_JOB_TYPE_USER_SIGNALED,
+} mali_soft_job_type;
+
+/**
+ * Soft job state.
+ *
+ * mali_soft_job_system_start_job a job will first be allocated.The job's state set to MALI_SOFT_JOB_STATE_ALLOCATED.
+ * Once the job is added to the timeline system, the state changes to MALI_SOFT_JOB_STATE_STARTED.
+ *
+ * For soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED the state is changed to
+ * MALI_SOFT_JOB_STATE_SIGNALED when @ref mali_soft_job_system_signal_job is called and the soft
+ * job's state is MALI_SOFT_JOB_STATE_STARTED or MALI_SOFT_JOB_STATE_TIMED_OUT.
+ *
+ * If a soft job of type MALI_SOFT_JOB_TYPE_USER_SIGNALED is timed out before being signaled, the
+ * state is changed to MALI_SOFT_JOB_STATE_TIMED_OUT. This can only happen to soft jobs in state
+ * MALI_SOFT_JOB_STATE_STARTED.
+ *
+ */
+typedef enum mali_soft_job_state {
+ MALI_SOFT_JOB_STATE_ALLOCATED,
+ MALI_SOFT_JOB_STATE_STARTED,
+ MALI_SOFT_JOB_STATE_SIGNALED,
+ MALI_SOFT_JOB_STATE_TIMED_OUT,
+} mali_soft_job_state;
+
+#define MALI_SOFT_JOB_INVALID_ID ((u32) -1)
+
+/**
+ * Soft job struct.
+ *
+ * Soft job can be used to represent any kind of CPU work done in kernel-space.
+ */
+typedef struct mali_soft_job {
+ mali_soft_job_type type; /**< Soft job type. Must be one of MALI_SOFT_JOB_TYPE_*. */
+ u64 user_job; /**< Identifier for soft job in user space. */
+ _mali_osk_atomic_t refcount; /**< Soft jobs are reference counted to prevent premature deletion. */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker for soft job. */
+ mali_bool activated; /**< MALI_TRUE if the job has been activated, MALI_FALSE if not. */
+ _mali_osk_notification_t *activated_notification; /**< Pre-allocated notification object for ACTIVATED_NOTIFICATION. */
+
+ /* Protected by soft job system lock. */
+ u32 id; /**< Used by user-space to find corresponding soft job in kernel-space. */
+ mali_soft_job_state state; /**< State of soft job, must be one of MALI_SOFT_JOB_STATE_*. */
+ struct mali_soft_job_system *system; /**< The soft job system this job is in. */
+ _mali_osk_list_t system_list; /**< List element used by soft job system. */
+} mali_soft_job;
+
+/**
+ * Per-session soft job system.
+ *
+ * The soft job system is used to manage all soft jobs that belongs to a session.
+ */
+typedef struct mali_soft_job_system {
+ struct mali_session_data *session; /**< The session this soft job system belongs to. */
+ _MALI_OSK_LIST_HEAD(jobs_used); /**< List of all allocated soft jobs. */
+
+ _mali_osk_spinlock_irq_t *lock; /**< Lock used to protect soft job system and its soft jobs. */
+ u32 lock_owner; /**< Contains tid of thread that locked the system or 0, if not locked. */
+ u32 last_job_id; /**< Recored the last job id protected by lock. */
+} mali_soft_job_system;
+
+/**
+ * Create a soft job system.
+ *
+ * @param session The session this soft job system will belong to.
+ * @return The new soft job system, or NULL if unsuccessful.
+ */
+struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session);
+
+/**
+ * Destroy a soft job system.
+ *
+ * @note The soft job must not have any started or activated jobs. Call @ref
+ * mali_soft_job_system_abort first.
+ *
+ * @param system The soft job system we are destroying.
+ */
+void mali_soft_job_system_destroy(struct mali_soft_job_system *system);
+
+/**
+ * Create a soft job.
+ *
+ * @param system Soft job system to create soft job from.
+ * @param type Type of the soft job.
+ * @param user_job Identifier for soft job in user space.
+ * @return New soft job if successful, NULL if not.
+ */
+struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job);
+
+/**
+ * Destroy soft job.
+ *
+ * @param job Soft job to destroy.
+ */
+void mali_soft_job_destroy(struct mali_soft_job *job);
+
+/**
+ * Start a soft job.
+ *
+ * The soft job will be added to the Timeline system which will then activate it after all
+ * dependencies have been resolved.
+ *
+ * Create soft jobs with @ref mali_soft_job_create before starting them.
+ *
+ * @param job Soft job to start.
+ * @param fence Fence representing dependencies for this soft job.
+ * @return Point on soft job timeline.
+ */
+mali_timeline_point mali_soft_job_start(struct mali_soft_job *job, struct mali_timeline_fence *fence);
+
+/**
+ * Use by user-space to signal that a soft job has completed.
+ *
+ * @note Only valid for soft jobs with type MALI_SOFT_JOB_TYPE_USER_SIGNALED.
+ *
+ * @note The soft job must be in state MALI_SOFT_JOB_STATE_STARTED for the signal to be successful.
+ *
+ * @note If the soft job was signaled successfully, or it received a time out, the soft job will be
+ * destroyed after this call and should no longer be used.
+ *
+ * @note This function will block until the soft job has been activated.
+ *
+ * @param system The soft job system the job was started in.
+ * @param job_id ID of soft job we are signaling.
+ *
+ * @return _MALI_OSK_ERR_ITEM_NOT_FOUND if the soft job ID was invalid, _MALI_OSK_ERR_TIMEOUT if the
+ * soft job was timed out or _MALI_OSK_ERR_OK if we successfully signaled the soft job.
+ */
+_mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system *system, u32 job_id);
+
+/**
+ * Used by the Timeline system to activate a soft job.
+ *
+ * @param job The soft job that is being activated.
+ * @return A scheduling bitmask.
+ */
+mali_scheduler_mask mali_soft_job_system_activate_job(struct mali_soft_job *job);
+
+/**
+ * Used by the Timeline system to timeout a soft job.
+ *
+ * A soft job is timed out if it completes or is signaled later than MALI_TIMELINE_TIMEOUT_HZ after
+ * activation.
+ *
+ * @param job The soft job that is being timed out.
+ * @return A scheduling bitmask.
+ */
+mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job);
+
+/**
+ * Used to cleanup activated soft jobs in the soft job system on session abort.
+ *
+ * @param system The soft job system that is being aborted.
+ */
+void mali_soft_job_system_abort(struct mali_soft_job_system *system);
+
+#endif /* __MALI_SOFT_JOB_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.c b/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.c
new file mode 100644
index 000000000000..178abaf43ba1
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_spinlock_reentrant.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct mali_spinlock_reentrant *mali_spinlock_reentrant_init(_mali_osk_lock_order_t lock_order)
+{
+ struct mali_spinlock_reentrant *spinlock;
+
+ spinlock = _mali_osk_calloc(1, sizeof(struct mali_spinlock_reentrant));
+ if (NULL == spinlock) {
+ return NULL;
+ }
+
+ spinlock->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, lock_order);
+ if (NULL == spinlock->lock) {
+ mali_spinlock_reentrant_term(spinlock);
+ return NULL;
+ }
+
+ return spinlock;
+}
+
+void mali_spinlock_reentrant_term(struct mali_spinlock_reentrant *spinlock)
+{
+ MALI_DEBUG_ASSERT_POINTER(spinlock);
+ MALI_DEBUG_ASSERT(0 == spinlock->counter && 0 == spinlock->owner);
+
+ if (NULL != spinlock->lock) {
+ _mali_osk_spinlock_irq_term(spinlock->lock);
+ }
+
+ _mali_osk_free(spinlock);
+}
+
+void mali_spinlock_reentrant_wait(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+ MALI_DEBUG_ASSERT_POINTER(spinlock);
+ MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+ MALI_DEBUG_ASSERT(0 != tid);
+
+ MALI_DEBUG_PRINT(5, ("%s ^\n", __FUNCTION__));
+
+ if (tid != spinlock->owner) {
+ _mali_osk_spinlock_irq_lock(spinlock->lock);
+ MALI_DEBUG_ASSERT(0 == spinlock->owner && 0 == spinlock->counter);
+ spinlock->owner = tid;
+ }
+
+ MALI_DEBUG_PRINT(5, ("%s v\n", __FUNCTION__));
+
+ ++spinlock->counter;
+}
+
+void mali_spinlock_reentrant_signal(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+ MALI_DEBUG_ASSERT_POINTER(spinlock);
+ MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+ MALI_DEBUG_ASSERT(0 != tid && tid == spinlock->owner);
+
+ --spinlock->counter;
+ if (0 == spinlock->counter) {
+ spinlock->owner = 0;
+ MALI_DEBUG_PRINT(5, ("%s release last\n", __FUNCTION__));
+ _mali_osk_spinlock_irq_unlock(spinlock->lock);
+ }
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.h b/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.h
new file mode 100644
index 000000000000..6a62df850b2f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_spinlock_reentrant.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SPINLOCK_REENTRANT_H__
+#define __MALI_SPINLOCK_REENTRANT_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * Reentrant spinlock.
+ */
+struct mali_spinlock_reentrant {
+ _mali_osk_spinlock_irq_t *lock;
+ u32 owner;
+ u32 counter;
+};
+
+/**
+ * Create a new reentrant spinlock.
+ *
+ * @param lock_order Lock order.
+ * @return New reentrant spinlock.
+ */
+struct mali_spinlock_reentrant *mali_spinlock_reentrant_init(_mali_osk_lock_order_t lock_order);
+
+/**
+ * Terminate reentrant spinlock and free any associated resources.
+ *
+ * @param spinlock Reentrant spinlock to terminate.
+ */
+void mali_spinlock_reentrant_term(struct mali_spinlock_reentrant *spinlock);
+
+/**
+ * Wait for reentrant spinlock to be signaled.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ */
+void mali_spinlock_reentrant_wait(struct mali_spinlock_reentrant *spinlock, u32 tid);
+
+/**
+ * Signal reentrant spinlock.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ */
+void mali_spinlock_reentrant_signal(struct mali_spinlock_reentrant *spinlock, u32 tid);
+
+/**
+ * Check if thread is holding reentrant spinlock.
+ *
+ * @param spinlock Reentrant spinlock.
+ * @param tid Thread ID.
+ * @return MALI_TRUE if thread is holding spinlock, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_spinlock_reentrant_is_held(struct mali_spinlock_reentrant *spinlock, u32 tid)
+{
+ MALI_DEBUG_ASSERT_POINTER(spinlock->lock);
+ return (tid == spinlock->owner && 0 < spinlock->counter);
+}
+
+#endif /* __MALI_SPINLOCK_REENTRANT_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_timeline.c b/drivers/gpu/arm/utgard/common/mali_timeline.c
new file mode 100644
index 000000000000..3f86077cde71
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_timeline.c
@@ -0,0 +1,1588 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timeline.h"
+#include "mali_kernel_common.h"
+#include "mali_scheduler.h"
+#include "mali_soft_job.h"
+#include "mali_timeline_fence_wait.h"
+#include "mali_timeline_sync_fence.h"
+#include "mali_executor.h"
+#include "mali_pp_job.h"
+
+#define MALI_TIMELINE_SYSTEM_LOCKED(system) (mali_spinlock_reentrant_is_held((system)->spinlock, _mali_osk_get_tid()))
+
+/*
+ * Following three elements are used to record how many
+ * gp, physical pp or virtual pp jobs are delayed in the whole
+ * timeline system, we can use these three value to decide
+ * if need to deactivate idle group.
+ */
+_mali_osk_atomic_t gp_tracker_count;
+_mali_osk_atomic_t phy_pp_tracker_count;
+_mali_osk_atomic_t virt_pp_tracker_count;
+
+static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system,
+ struct mali_timeline_waiter *waiter);
+
+#if defined(CONFIG_SYNC)
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+
+struct mali_deferred_fence_put_entry {
+ struct hlist_node list;
+ struct sync_fence *fence;
+};
+
+static HLIST_HEAD(mali_timeline_sync_fence_to_free_list);
+static DEFINE_SPINLOCK(mali_timeline_sync_fence_to_free_lock);
+
+static void put_sync_fences(struct work_struct *ignore)
+{
+ struct hlist_head list;
+ struct hlist_node *tmp, *pos;
+ unsigned long flags;
+ struct mali_deferred_fence_put_entry *o;
+
+ spin_lock_irqsave(&mali_timeline_sync_fence_to_free_lock, flags);
+ hlist_move_list(&mali_timeline_sync_fence_to_free_list, &list);
+ spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags);
+
+ hlist_for_each_entry_safe(o, pos, tmp, &list, list) {
+ sync_fence_put(o->fence);
+ kfree(o);
+ }
+}
+
+static DECLARE_DELAYED_WORK(delayed_sync_fence_put, put_sync_fences);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */
+
+/* Callback that is called when a sync fence a tracker is waiting on is signaled. */
+static void mali_timeline_sync_fence_callback(struct sync_fence *sync_fence, struct sync_fence_waiter *sync_fence_waiter)
+{
+ struct mali_timeline_system *system;
+ struct mali_timeline_waiter *waiter;
+ struct mali_timeline_tracker *tracker;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ u32 tid = _mali_osk_get_tid();
+ mali_bool is_aborting = MALI_FALSE;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ int fence_status = sync_fence->status;
+#else
+ int fence_status = atomic_read(&sync_fence->status);
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(sync_fence);
+ MALI_DEBUG_ASSERT_POINTER(sync_fence_waiter);
+
+ tracker = _MALI_OSK_CONTAINER_OF(sync_fence_waiter, struct mali_timeline_tracker, sync_fence_waiter);
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ system = tracker->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ is_aborting = system->session->is_aborting;
+ if (!is_aborting && (0 > fence_status)) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, fence_status));
+ tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT;
+ }
+
+ waiter = tracker->waiter_sync;
+ MALI_DEBUG_ASSERT_POINTER(waiter);
+
+ tracker->sync_fence = NULL;
+ tracker->fence.sync_fd = -1;
+
+ schedule_mask |= mali_timeline_system_release_waiter(system, waiter);
+
+ /* If aborting, wake up sleepers that are waiting for sync fence callbacks to complete. */
+ if (is_aborting) {
+ _mali_osk_wait_queue_wake_up(system->wait_queue);
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ /*
+ * Older versions of Linux, before 3.5, doesn't support fput() in interrupt
+ * context. For those older kernels, allocate a list object and put the
+ * fence object on that and defer the call to sync_fence_put() to a workqueue.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
+ {
+ struct mali_deferred_fence_put_entry *obj;
+
+ obj = kzalloc(sizeof(struct mali_deferred_fence_put_entry), GFP_ATOMIC);
+ if (obj) {
+ unsigned long flags;
+ mali_bool schedule = MALI_FALSE;
+
+ obj->fence = sync_fence;
+
+ spin_lock_irqsave(&mali_timeline_sync_fence_to_free_lock, flags);
+ if (hlist_empty(&mali_timeline_sync_fence_to_free_list))
+ schedule = MALI_TRUE;
+ hlist_add_head(&obj->list, &mali_timeline_sync_fence_to_free_list);
+ spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags);
+
+ if (schedule)
+ schedule_delayed_work(&delayed_sync_fence_put, 0);
+ }
+ }
+#else
+ sync_fence_put(sync_fence);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */
+
+ if (!is_aborting) {
+ mali_executor_schedule_from_mask(schedule_mask, MALI_TRUE);
+ }
+}
+#endif /* defined(CONFIG_SYNC) */
+
+static mali_scheduler_mask mali_timeline_tracker_time_out(struct mali_timeline_tracker *tracker)
+{
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_SOFT == tracker->type);
+
+ return mali_soft_job_system_timeout_job((struct mali_soft_job *) tracker->job);
+}
+
+static void mali_timeline_timer_callback(void *data)
+{
+ struct mali_timeline_system *system;
+ struct mali_timeline_tracker *tracker;
+ struct mali_timeline *timeline;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ u32 tid = _mali_osk_get_tid();
+
+ timeline = (struct mali_timeline *) data;
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ system = timeline->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ if (!system->timer_enabled) {
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ return;
+ }
+
+ tracker = timeline->tracker_tail;
+ timeline->timer_active = MALI_FALSE;
+
+ if (NULL != tracker && MALI_TRUE == tracker->timer_active) {
+ /* This is likely the delayed work that has been schedule out before cancelled. */
+ if (MALI_TIMELINE_TIMEOUT_HZ > (_mali_osk_time_tickcount() - tracker->os_tick_activate)) {
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ return;
+ }
+
+ schedule_mask = mali_timeline_tracker_time_out(tracker);
+ tracker->timer_active = MALI_FALSE;
+ } else {
+ MALI_PRINT_ERROR(("Mali Timeline: Soft job timer callback without a waiting tracker.\n"));
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+void mali_timeline_system_stop_timer(struct mali_timeline_system *system)
+{
+ u32 i;
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ system->timer_enabled = MALI_FALSE;
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ if (NULL != timeline->delayed_work) {
+ _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work);
+ timeline->timer_active = MALI_FALSE;
+ }
+ }
+}
+
+static void mali_timeline_destroy(struct mali_timeline *timeline)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ if (NULL != timeline) {
+ /* Assert that the timeline object has been properly cleaned up before destroying it. */
+ MALI_DEBUG_ASSERT(timeline->point_oldest == timeline->point_next);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_head);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+ MALI_DEBUG_ASSERT(NULL == timeline->waiter_head);
+ MALI_DEBUG_ASSERT(NULL == timeline->waiter_tail);
+ MALI_DEBUG_ASSERT(NULL != timeline->system);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_MAX > timeline->id);
+
+ if (NULL != timeline->delayed_work) {
+ _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work);
+ _mali_osk_wq_delayed_delete_work_nonflush(timeline->delayed_work);
+ }
+#if defined(CONFIG_SYNC)
+ if (NULL != timeline->sync_tl) {
+ sync_timeline_destroy(timeline->sync_tl);
+ }
+#else
+ _mali_osk_free(timeline);
+#endif
+ }
+}
+
+static struct mali_timeline *mali_timeline_create(struct mali_timeline_system *system, enum mali_timeline_id id)
+{
+ struct mali_timeline *timeline;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT(id < MALI_TIMELINE_MAX);
+
+ timeline = (struct mali_timeline *) _mali_osk_calloc(1, sizeof(struct mali_timeline));
+ if (NULL == timeline) {
+ return NULL;
+ }
+
+ /* Initially the timeline is empty. */
+#if defined(MALI_TIMELINE_DEBUG_START_POINT)
+ /* Start the timeline a bit before wrapping when debugging. */
+ timeline->point_next = UINT_MAX - MALI_TIMELINE_MAX_POINT_SPAN - 128;
+#else
+ timeline->point_next = 1;
+#endif
+ timeline->point_oldest = timeline->point_next;
+
+ /* The tracker and waiter lists will initially be empty. */
+
+ timeline->system = system;
+ timeline->id = id;
+
+ timeline->delayed_work = _mali_osk_wq_delayed_create_work(mali_timeline_timer_callback, timeline);
+ if (NULL == timeline->delayed_work) {
+ mali_timeline_destroy(timeline);
+ return NULL;
+ }
+
+ timeline->timer_active = MALI_FALSE;
+
+#if defined(CONFIG_SYNC)
+ {
+ char timeline_name[32];
+
+ switch (id) {
+ case MALI_TIMELINE_GP:
+ _mali_osk_snprintf(timeline_name, 32, "mali-%u-gp", _mali_osk_get_pid());
+ break;
+ case MALI_TIMELINE_PP:
+ _mali_osk_snprintf(timeline_name, 32, "mali-%u-pp", _mali_osk_get_pid());
+ break;
+ case MALI_TIMELINE_SOFT:
+ _mali_osk_snprintf(timeline_name, 32, "mali-%u-soft", _mali_osk_get_pid());
+ break;
+ default:
+ MALI_PRINT_ERROR(("Mali Timeline: Invalid timeline id %d\n", id));
+ mali_timeline_destroy(timeline);
+ return NULL;
+ }
+
+ timeline->destroyed = MALI_FALSE;
+
+ timeline->sync_tl = mali_sync_timeline_create(timeline, timeline_name);
+ if (NULL == timeline->sync_tl) {
+ mali_timeline_destroy(timeline);
+ return NULL;
+ }
+
+ timeline->spinlock = mali_spinlock_reentrant_init(_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM);
+ if (NULL == timeline->spinlock) {
+ mali_timeline_destroy(timeline);
+ return NULL;
+ }
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+ return timeline;
+}
+
+static void mali_timeline_insert_tracker(struct mali_timeline *timeline, struct mali_timeline_tracker *tracker)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ if (mali_timeline_is_full(timeline)) {
+ /* Don't add tracker if timeline is full. */
+ tracker->point = MALI_TIMELINE_NO_POINT;
+ return;
+ }
+
+ tracker->timeline = timeline;
+ tracker->point = timeline->point_next;
+
+ /* Find next available point. */
+ timeline->point_next++;
+ if (MALI_TIMELINE_NO_POINT == timeline->point_next) {
+ timeline->point_next++;
+ }
+
+ MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+
+ if (MALI_TIMELINE_TRACKER_GP == tracker->type) {
+ _mali_osk_atomic_inc(&gp_tracker_count);
+ } else if (MALI_TIMELINE_TRACKER_PP == tracker->type) {
+ if (mali_pp_job_is_virtual((struct mali_pp_job *)tracker->job)) {
+ _mali_osk_atomic_inc(&virt_pp_tracker_count);
+ } else {
+ _mali_osk_atomic_inc(&phy_pp_tracker_count);
+ }
+ }
+
+ /* Add tracker as new head on timeline's tracker list. */
+ if (NULL == timeline->tracker_head) {
+ /* Tracker list is empty. */
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+
+ timeline->tracker_tail = tracker;
+
+ MALI_DEBUG_ASSERT(NULL == tracker->timeline_next);
+ MALI_DEBUG_ASSERT(NULL == tracker->timeline_prev);
+ } else {
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_head->timeline_next);
+
+ tracker->timeline_prev = timeline->tracker_head;
+ timeline->tracker_head->timeline_next = tracker;
+
+ MALI_DEBUG_ASSERT(NULL == tracker->timeline_next);
+ }
+ timeline->tracker_head = tracker;
+
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_head->timeline_next);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail->timeline_prev);
+}
+
+/* Inserting the waiter object into the given timeline */
+static void mali_timeline_insert_waiter(struct mali_timeline *timeline, struct mali_timeline_waiter *waiter_new)
+{
+ struct mali_timeline_waiter *waiter_prev;
+ struct mali_timeline_waiter *waiter_next;
+
+ /* Waiter time must be between timeline head and tail, and there must
+ * be less than MALI_TIMELINE_MAX_POINT_SPAN elements between */
+ MALI_DEBUG_ASSERT((waiter_new->point - timeline->point_oldest) < MALI_TIMELINE_MAX_POINT_SPAN);
+ MALI_DEBUG_ASSERT((-waiter_new->point + timeline->point_next) < MALI_TIMELINE_MAX_POINT_SPAN);
+
+ /* Finding out where to put this waiter, in the linked waiter list of the given timeline **/
+ waiter_prev = timeline->waiter_head; /* Insert new after waiter_prev */
+ waiter_next = NULL; /* Insert new before waiter_next */
+
+ /* Iterating backwards from head (newest) to tail (oldest) until we
+ * find the correct spot to insert the new waiter */
+ while (waiter_prev && mali_timeline_point_after(waiter_prev->point, waiter_new->point)) {
+ waiter_next = waiter_prev;
+ waiter_prev = waiter_prev->timeline_prev;
+ }
+
+ if (NULL == waiter_prev && NULL == waiter_next) {
+ /* list is empty */
+ timeline->waiter_head = waiter_new;
+ timeline->waiter_tail = waiter_new;
+ } else if (NULL == waiter_next) {
+ /* insert at head */
+ waiter_new->timeline_prev = timeline->waiter_head;
+ timeline->waiter_head->timeline_next = waiter_new;
+ timeline->waiter_head = waiter_new;
+ } else if (NULL == waiter_prev) {
+ /* insert at tail */
+ waiter_new->timeline_next = timeline->waiter_tail;
+ timeline->waiter_tail->timeline_prev = waiter_new;
+ timeline->waiter_tail = waiter_new;
+ } else {
+ /* insert between */
+ waiter_new->timeline_next = waiter_next;
+ waiter_new->timeline_prev = waiter_prev;
+ waiter_next->timeline_prev = waiter_new;
+ waiter_prev->timeline_next = waiter_new;
+ }
+}
+
+static void mali_timeline_update_delayed_work(struct mali_timeline *timeline)
+{
+ struct mali_timeline_system *system;
+ struct mali_timeline_tracker *oldest_tracker;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SOFT == timeline->id);
+
+ system = timeline->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ /* Timer is disabled, early out. */
+ if (!system->timer_enabled) return;
+
+ oldest_tracker = timeline->tracker_tail;
+ if (NULL != oldest_tracker && 0 == oldest_tracker->trigger_ref_count) {
+ if (MALI_FALSE == oldest_tracker->timer_active) {
+ if (MALI_TRUE == timeline->timer_active) {
+ _mali_osk_wq_delayed_cancel_work_async(timeline->delayed_work);
+ }
+ _mali_osk_wq_delayed_schedule_work(timeline->delayed_work, MALI_TIMELINE_TIMEOUT_HZ);
+ oldest_tracker->timer_active = MALI_TRUE;
+ timeline->timer_active = MALI_TRUE;
+ }
+ } else if (MALI_TRUE == timeline->timer_active) {
+ _mali_osk_wq_delayed_cancel_work_async(timeline->delayed_work);
+ timeline->timer_active = MALI_FALSE;
+ }
+}
+
+static mali_scheduler_mask mali_timeline_update_oldest_point(struct mali_timeline *timeline)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ MALI_DEBUG_CODE({
+ struct mali_timeline_system *system = timeline->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+ });
+
+ if (NULL != timeline->tracker_tail) {
+ /* Set oldest point to oldest tracker's point */
+ timeline->point_oldest = timeline->tracker_tail->point;
+ } else {
+ /* No trackers, mark point list as empty */
+ timeline->point_oldest = timeline->point_next;
+ }
+
+ /* Release all waiters no longer on the timeline's point list.
+ * Releasing a waiter can trigger this function to be called again, so
+ * we do not store any pointers on stack. */
+ while (NULL != timeline->waiter_tail) {
+ u32 waiter_time_relative;
+ u32 time_head_relative;
+ struct mali_timeline_waiter *waiter = timeline->waiter_tail;
+
+ time_head_relative = timeline->point_next - timeline->point_oldest;
+ waiter_time_relative = waiter->point - timeline->point_oldest;
+
+ if (waiter_time_relative < time_head_relative) {
+ /* This and all following waiters are on the point list, so we are done. */
+ break;
+ }
+
+ /* Remove waiter from timeline's waiter list. */
+ if (NULL != waiter->timeline_next) {
+ waiter->timeline_next->timeline_prev = NULL;
+ } else {
+ /* This was the last waiter */
+ timeline->waiter_head = NULL;
+ }
+ timeline->waiter_tail = waiter->timeline_next;
+
+ /* Release waiter. This could activate a tracker, if this was
+ * the last waiter for the tracker. */
+ schedule_mask |= mali_timeline_system_release_waiter(timeline->system, waiter);
+ }
+
+ return schedule_mask;
+}
+
+void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker,
+ mali_timeline_tracker_type type,
+ struct mali_timeline_fence *fence,
+ void *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAX > type);
+
+ /* Zero out all tracker members. */
+ _mali_osk_memset(tracker, 0, sizeof(*tracker));
+
+ tracker->type = type;
+ tracker->job = job;
+ tracker->trigger_ref_count = 1; /* Prevents any callback from trigging while adding it */
+ tracker->os_tick_create = _mali_osk_time_tickcount();
+ MALI_DEBUG_CODE(tracker->magic = MALI_TIMELINE_TRACKER_MAGIC);
+
+ tracker->activation_error = MALI_TIMELINE_ACTIVATION_ERROR_NONE;
+
+ /* Copy fence. */
+ if (NULL != fence) {
+ _mali_osk_memcpy(&tracker->fence, fence, sizeof(struct mali_timeline_fence));
+ }
+}
+
+mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker)
+{
+ struct mali_timeline *timeline;
+ struct mali_timeline_system *system;
+ struct mali_timeline_tracker *tracker_next, *tracker_prev;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ u32 tid = _mali_osk_get_tid();
+
+ /* Upon entry a group lock will be held, but not a scheduler lock. */
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+ /* Tracker should have been triggered */
+ MALI_DEBUG_ASSERT(0 == tracker->trigger_ref_count);
+
+ /* All waiters should have been released at this point */
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_head);
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+
+ MALI_DEBUG_PRINT(3, ("Mali Timeline: releasing tracker for job 0x%08X\n", tracker->job));
+
+ timeline = tracker->timeline;
+ if (NULL == timeline) {
+ /* Tracker was not on a timeline, there is nothing to release. */
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ system = timeline->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ /* Tracker should still be on timeline */
+ MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+ MALI_DEBUG_ASSERT(mali_timeline_is_point_on(timeline, tracker->point));
+
+ /* Tracker is no longer valid. */
+ MALI_DEBUG_CODE(tracker->magic = 0);
+
+ tracker_next = tracker->timeline_next;
+ tracker_prev = tracker->timeline_prev;
+ tracker->timeline_next = NULL;
+ tracker->timeline_prev = NULL;
+
+ /* Removing tracker from timeline's tracker list */
+ if (NULL == tracker_next) {
+ /* This tracker was the head */
+ timeline->tracker_head = tracker_prev;
+ } else {
+ tracker_next->timeline_prev = tracker_prev;
+ }
+
+ if (NULL == tracker_prev) {
+ /* This tracker was the tail */
+ timeline->tracker_tail = tracker_next;
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+ /* Update the timeline's oldest time and release any waiters */
+ schedule_mask |= mali_timeline_update_oldest_point(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+ } else {
+ tracker_prev->timeline_next = tracker_next;
+ }
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ /* Update delayed work only when it is the soft job timeline */
+ if (MALI_TIMELINE_SOFT == tracker->timeline->id) {
+ mali_timeline_update_delayed_work(tracker->timeline);
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ return schedule_mask;
+}
+
+void mali_timeline_system_release_waiter_list(struct mali_timeline_system *system,
+ struct mali_timeline_waiter *tail,
+ struct mali_timeline_waiter *head)
+{
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(head);
+ MALI_DEBUG_ASSERT_POINTER(tail);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ head->tracker_next = system->waiter_empty_list;
+ system->waiter_empty_list = tail;
+}
+
+static mali_scheduler_mask mali_timeline_tracker_activate(struct mali_timeline_tracker *tracker)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ struct mali_timeline_system *system;
+ struct mali_timeline *timeline;
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+ system = tracker->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ tracker->os_tick_activate = _mali_osk_time_tickcount();
+
+ if (NULL != tracker->waiter_head) {
+ mali_timeline_system_release_waiter_list(system, tracker->waiter_tail, tracker->waiter_head);
+ tracker->waiter_head = NULL;
+ tracker->waiter_tail = NULL;
+ }
+
+ switch (tracker->type) {
+ case MALI_TIMELINE_TRACKER_GP:
+ schedule_mask = mali_scheduler_activate_gp_job((struct mali_gp_job *) tracker->job);
+
+ _mali_osk_atomic_dec(&gp_tracker_count);
+ break;
+ case MALI_TIMELINE_TRACKER_PP:
+ if (mali_pp_job_is_virtual((struct mali_pp_job *)tracker->job)) {
+ _mali_osk_atomic_dec(&virt_pp_tracker_count);
+ } else {
+ _mali_osk_atomic_dec(&phy_pp_tracker_count);
+ }
+ schedule_mask = mali_scheduler_activate_pp_job((struct mali_pp_job *) tracker->job);
+ break;
+ case MALI_TIMELINE_TRACKER_SOFT:
+ timeline = tracker->timeline;
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ schedule_mask |= mali_soft_job_system_activate_job((struct mali_soft_job *) tracker->job);
+
+ /* Start a soft timer to make sure the soft job be released in a limited time */
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ mali_timeline_update_delayed_work(timeline);
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ break;
+ case MALI_TIMELINE_TRACKER_WAIT:
+ mali_timeline_fence_wait_activate((struct mali_timeline_fence_wait_tracker *) tracker->job);
+ break;
+ case MALI_TIMELINE_TRACKER_SYNC:
+#if defined(CONFIG_SYNC)
+ mali_timeline_sync_fence_activate((struct mali_timeline_sync_fence_tracker *) tracker->job);
+#else
+ MALI_PRINT_ERROR(("Mali Timeline: sync tracker not supported\n", tracker->type));
+#endif /* defined(CONFIG_SYNC) */
+ break;
+ default:
+ MALI_PRINT_ERROR(("Mali Timeline - Illegal tracker type: %d\n", tracker->type));
+ break;
+ }
+
+ return schedule_mask;
+}
+
+void mali_timeline_system_tracker_get(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker)
+{
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+ tracker->trigger_ref_count++;
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+}
+
+mali_scheduler_mask mali_timeline_system_tracker_put(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker, mali_timeline_activation_error activation_error)
+{
+ u32 tid = _mali_osk_get_tid();
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+ tracker->trigger_ref_count--;
+
+ tracker->activation_error |= activation_error;
+
+ if (0 == tracker->trigger_ref_count) {
+ schedule_mask |= mali_timeline_tracker_activate(tracker);
+ tracker = NULL;
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ return schedule_mask;
+}
+
+void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(fence);
+ MALI_DEBUG_ASSERT_POINTER(uk_fence);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ fence->points[i] = uk_fence->points[i];
+ }
+
+ fence->sync_fd = uk_fence->sync_fd;
+}
+
+struct mali_timeline_system *mali_timeline_system_create(struct mali_session_data *session)
+{
+ u32 i;
+ struct mali_timeline_system *system;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: creating timeline system\n"));
+
+ system = (struct mali_timeline_system *) _mali_osk_calloc(1, sizeof(struct mali_timeline_system));
+ if (NULL == system) {
+ return NULL;
+ }
+
+ system->spinlock = mali_spinlock_reentrant_init(_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM);
+ if (NULL == system->spinlock) {
+ mali_timeline_system_destroy(system);
+ return NULL;
+ }
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ system->timelines[i] = mali_timeline_create(system, (enum mali_timeline_id)i);
+ if (NULL == system->timelines[i]) {
+ mali_timeline_system_destroy(system);
+ return NULL;
+ }
+ }
+
+#if defined(CONFIG_SYNC)
+ system->signaled_sync_tl = mali_sync_timeline_create(NULL, "mali-always-signaled");
+ if (NULL == system->signaled_sync_tl) {
+ mali_timeline_system_destroy(system);
+ return NULL;
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+ system->waiter_empty_list = NULL;
+ system->session = session;
+ system->timer_enabled = MALI_TRUE;
+
+ system->wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == system->wait_queue) {
+ mali_timeline_system_destroy(system);
+ return NULL;
+ }
+
+ return system;
+}
+
+#if defined(CONFIG_SYNC)
+
+/**
+ * Check if there are any trackers left on timeline.
+ *
+ * Used as a wait queue conditional.
+ *
+ * @param data Timeline.
+ * @return MALI_TRUE if there are no trackers on timeline, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_has_no_trackers(void *data)
+{
+ struct mali_timeline *timeline = (struct mali_timeline *) data;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ return mali_timeline_is_empty(timeline);
+}
+
+/**
+ * Cancel sync fence waiters waited upon by trackers on all timelines.
+ *
+ * Will return after all timelines have no trackers left.
+ *
+ * @param system Timeline system.
+ */
+static void mali_timeline_cancel_sync_fence_waiters(struct mali_timeline_system *system)
+{
+ u32 i;
+ u32 tid = _mali_osk_get_tid();
+ struct mali_timeline_tracker *tracker, *tracker_next;
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(tracker_list);
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ /* Cancel sync fence waiters. */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ tracker_next = timeline->tracker_tail;
+ while (NULL != tracker_next) {
+ tracker = tracker_next;
+ tracker_next = tracker->timeline_next;
+
+ if (NULL == tracker->sync_fence) continue;
+
+ MALI_DEBUG_PRINT(3, ("Mali Timeline: Cancelling sync fence wait for tracker 0x%08X.\n", tracker));
+
+ /* Cancel sync fence waiter. */
+ if (0 == sync_fence_cancel_async(tracker->sync_fence, &tracker->sync_fence_waiter)) {
+ /* Callback was not called, move tracker to local list. */
+ _mali_osk_list_add(&tracker->sync_fence_cancel_list, &tracker_list);
+ }
+ }
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ /* Manually call sync fence callback in order to release waiter and trigger activation of tracker. */
+ _MALI_OSK_LIST_FOREACHENTRY(tracker, tracker_next, &tracker_list, struct mali_timeline_tracker, sync_fence_cancel_list) {
+ mali_timeline_sync_fence_callback(tracker->sync_fence, &tracker->sync_fence_waiter);
+ }
+
+ /* Sleep until all sync fence callbacks are done and all timelines are empty. */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_has_no_trackers, (void *) timeline);
+ }
+}
+
+#endif /* defined(CONFIG_SYNC) */
+
+void mali_timeline_system_abort(struct mali_timeline_system *system)
+{
+ MALI_DEBUG_CODE(u32 tid = _mali_osk_get_tid(););
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+ MALI_DEBUG_PRINT(3, ("Mali Timeline: Aborting timeline system for session 0x%08X.\n", system->session));
+
+#if defined(CONFIG_SYNC)
+ mali_timeline_cancel_sync_fence_waiters(system);
+#endif /* defined(CONFIG_SYNC) */
+
+ /* Should not be any waiters or trackers left at this point. */
+ MALI_DEBUG_CODE({
+ u32 i;
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i)
+ {
+ struct mali_timeline *timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(timeline->point_oldest == timeline->point_next);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_head);
+ MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail);
+ MALI_DEBUG_ASSERT(NULL == timeline->waiter_head);
+ MALI_DEBUG_ASSERT(NULL == timeline->waiter_tail);
+ }
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ });
+}
+
+void mali_timeline_system_destroy(struct mali_timeline_system *system)
+{
+ u32 i;
+ struct mali_timeline_waiter *waiter, *next;
+#if defined(CONFIG_SYNC)
+ u32 tid = _mali_osk_get_tid();
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: destroying timeline system\n"));
+
+ if (NULL != system) {
+
+ /* There should be no waiters left on this queue. */
+ if (NULL != system->wait_queue) {
+ _mali_osk_wait_queue_term(system->wait_queue);
+ system->wait_queue = NULL;
+ }
+
+ /* Free all waiters in empty list */
+ waiter = system->waiter_empty_list;
+ while (NULL != waiter) {
+ next = waiter->tracker_next;
+ _mali_osk_free(waiter);
+ waiter = next;
+ }
+
+#if defined(CONFIG_SYNC)
+ if (NULL != system->signaled_sync_tl) {
+ sync_timeline_destroy(system->signaled_sync_tl);
+ }
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ if ((NULL != system->timelines[i]) && (NULL != system->timelines[i]->spinlock)) {
+ mali_spinlock_reentrant_wait(system->timelines[i]->spinlock, tid);
+ system->timelines[i]->destroyed = MALI_TRUE;
+ mali_spinlock_reentrant_signal(system->timelines[i]->spinlock, tid);
+ if (NULL != system->timelines[i]->sync_tl) {
+ sync_timeline_destroy(system->timelines[i]->sync_tl);
+ }
+ }
+ }
+#else
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ if (NULL != system->timelines[i]) {
+ mali_timeline_destroy(system->timelines[i]);
+ }
+ }
+
+#endif /* defined(CONFIG_SYNC) */
+
+
+ if (NULL != system->spinlock) {
+ mali_spinlock_reentrant_term(system->spinlock);
+ }
+
+ _mali_osk_free(system);
+ }
+}
+
+/**
+ * Find how many waiters are needed for a given fence.
+ *
+ * @param fence The fence to check.
+ * @return Number of waiters needed for fence.
+ */
+static u32 mali_timeline_fence_num_waiters(struct mali_timeline_fence *fence)
+{
+ u32 i, num_waiters = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ if (MALI_TIMELINE_NO_POINT != fence->points[i]) {
+ ++num_waiters;
+ }
+ }
+
+#if defined(CONFIG_SYNC)
+ if (-1 != fence->sync_fd) ++num_waiters;
+#endif /* defined(CONFIG_SYNC) */
+
+ return num_waiters;
+}
+
+static struct mali_timeline_waiter *mali_timeline_system_get_zeroed_waiter(struct mali_timeline_system *system)
+{
+ struct mali_timeline_waiter *waiter;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ waiter = system->waiter_empty_list;
+ if (NULL != waiter) {
+ /* Remove waiter from empty list and zero it */
+ system->waiter_empty_list = waiter->tracker_next;
+ _mali_osk_memset(waiter, 0, sizeof(*waiter));
+ }
+
+ /* Return NULL if list was empty. */
+ return waiter;
+}
+
+static void mali_timeline_system_allocate_waiters(struct mali_timeline_system *system,
+ struct mali_timeline_waiter **tail,
+ struct mali_timeline_waiter **head,
+ int max_num_waiters)
+{
+ u32 i, tid = _mali_osk_get_tid();
+ mali_bool do_alloc;
+ struct mali_timeline_waiter *waiter;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(tail);
+ MALI_DEBUG_ASSERT_POINTER(head);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ *head = *tail = NULL;
+ do_alloc = MALI_FALSE;
+ i = 0;
+ while (i < max_num_waiters) {
+ if (MALI_FALSE == do_alloc) {
+ waiter = mali_timeline_system_get_zeroed_waiter(system);
+ if (NULL == waiter) {
+ do_alloc = MALI_TRUE;
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ continue;
+ }
+ } else {
+ waiter = _mali_osk_calloc(1, sizeof(struct mali_timeline_waiter));
+ if (NULL == waiter) break;
+ }
+ ++i;
+ if (NULL == *tail) {
+ *tail = waiter;
+ *head = waiter;
+ } else {
+ (*head)->tracker_next = waiter;
+ *head = waiter;
+ }
+ }
+ if (MALI_TRUE == do_alloc) {
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ }
+}
+
+/**
+ * Create waiters for the given tracker. The tracker is activated when all waiters are release.
+ *
+ * @note Tracker can potentially be activated before this function returns.
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker we will create waiters for.
+ * @param waiter_tail List of pre-allocated waiters.
+ * @param waiter_head List of pre-allocated waiters.
+ */
+static void mali_timeline_system_create_waiters_and_unlock(struct mali_timeline_system *system,
+ struct mali_timeline_tracker *tracker,
+ struct mali_timeline_waiter *waiter_tail,
+ struct mali_timeline_waiter *waiter_head)
+{
+ int i;
+ u32 tid = _mali_osk_get_tid();
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+#if defined(CONFIG_SYNC)
+ struct sync_fence *sync_fence = NULL;
+#endif /* defined(CONFIG_SYNC) */
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_head);
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+ MALI_DEBUG_ASSERT(NULL != tracker->job);
+
+ /* Creating waiter object for all the timelines the fence is put on. Inserting this waiter
+ * into the timelines sorted list of waiters */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ mali_timeline_point point;
+ struct mali_timeline *timeline;
+ struct mali_timeline_waiter *waiter;
+
+ /* Get point on current timeline from tracker's fence. */
+ point = tracker->fence.points[i];
+
+ if (likely(MALI_TIMELINE_NO_POINT == point)) {
+ /* Fence contains no point on this timeline so we don't need a waiter. */
+ continue;
+ }
+
+ timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ if (unlikely(!mali_timeline_is_point_valid(timeline, point))) {
+ MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n",
+ point, timeline->point_oldest, timeline->point_next));
+ continue;
+ }
+
+ if (likely(mali_timeline_is_point_released(timeline, point))) {
+ /* Tracker representing the point has been released so we don't need a
+ * waiter. */
+ continue;
+ }
+
+ /* The point is on timeline. */
+ MALI_DEBUG_ASSERT(mali_timeline_is_point_on(timeline, point));
+
+ /* Get a new zeroed waiter object. */
+ if (likely(NULL != waiter_tail)) {
+ waiter = waiter_tail;
+ waiter_tail = waiter_tail->tracker_next;
+ } else {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n"));
+ continue;
+ }
+
+ /* Yanking the trigger ref count of the tracker. */
+ tracker->trigger_ref_count++;
+
+ waiter->point = point;
+ waiter->tracker = tracker;
+
+ /* Insert waiter on tracker's singly-linked waiter list. */
+ if (NULL == tracker->waiter_head) {
+ /* list is empty */
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+ tracker->waiter_tail = waiter;
+ } else {
+ tracker->waiter_head->tracker_next = waiter;
+ }
+ tracker->waiter_head = waiter;
+
+ /* Add waiter to timeline. */
+ mali_timeline_insert_waiter(timeline, waiter);
+ }
+#if defined(CONFIG_SYNC)
+ if (-1 != tracker->fence.sync_fd) {
+ int ret;
+ struct mali_timeline_waiter *waiter;
+
+ sync_fence = sync_fence_fdget(tracker->fence.sync_fd);
+ if (unlikely(NULL == sync_fence)) {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", tracker->fence.sync_fd));
+ goto exit;
+ }
+
+ /* Check if we have a zeroed waiter object available. */
+ if (unlikely(NULL == waiter_tail)) {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n"));
+ goto exit;
+ }
+
+ /* Start asynchronous wait that will release waiter when the fence is signaled. */
+ sync_fence_waiter_init(&tracker->sync_fence_waiter, mali_timeline_sync_fence_callback);
+ ret = sync_fence_wait_async(sync_fence, &tracker->sync_fence_waiter);
+ if (1 == ret) {
+ /* Fence already signaled, no waiter needed. */
+ tracker->fence.sync_fd = -1;
+ goto exit;
+ } else if (0 != ret) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, ret));
+ tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT;
+ goto exit;
+ }
+
+ /* Grab new zeroed waiter object. */
+ waiter = waiter_tail;
+ waiter_tail = waiter_tail->tracker_next;
+
+ /* Increase the trigger ref count of the tracker. */
+ tracker->trigger_ref_count++;
+
+ waiter->point = MALI_TIMELINE_NO_POINT;
+ waiter->tracker = tracker;
+
+ /* Insert waiter on tracker's singly-linked waiter list. */
+ if (NULL == tracker->waiter_head) {
+ /* list is empty */
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+ tracker->waiter_tail = waiter;
+ } else {
+ tracker->waiter_head->tracker_next = waiter;
+ }
+ tracker->waiter_head = waiter;
+
+ /* Also store waiter in separate field for easy access by sync callback. */
+ tracker->waiter_sync = waiter;
+
+ /* Store the sync fence in tracker so we can retrieve in abort session, if needed. */
+ tracker->sync_fence = sync_fence;
+
+ sync_fence = NULL;
+ }
+exit:
+#endif /* defined(CONFIG_SYNC) */
+
+ if (NULL != waiter_tail) {
+ mali_timeline_system_release_waiter_list(system, waiter_tail, waiter_head);
+ }
+
+ /* Release the initial trigger ref count. */
+ tracker->trigger_ref_count--;
+
+ /* If there were no waiters added to this tracker we activate immediately. */
+ if (0 == tracker->trigger_ref_count) {
+ schedule_mask |= mali_timeline_tracker_activate(tracker);
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+#if defined(CONFIG_SYNC)
+ if (NULL != sync_fence) {
+ sync_fence_put(sync_fence);
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
+}
+
+mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system,
+ struct mali_timeline_tracker *tracker,
+ enum mali_timeline_id timeline_id)
+{
+ int num_waiters = 0;
+ struct mali_timeline_waiter *waiter_tail, *waiter_head;
+ u32 tid = _mali_osk_get_tid();
+ mali_timeline_point point = MALI_TIMELINE_NO_POINT;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ MALI_DEBUG_ASSERT(MALI_FALSE == system->session->is_aborting);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAX > tracker->type);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: adding tracker for job %p, timeline: %d\n", tracker->job, timeline_id));
+
+ MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count);
+ tracker->system = system;
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ num_waiters = mali_timeline_fence_num_waiters(&tracker->fence);
+
+ /* Allocate waiters. */
+ mali_timeline_system_allocate_waiters(system, &waiter_tail, &waiter_head, num_waiters);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ /* Add tracker to timeline. This will allocate a point for the tracker on the timeline. If
+ * timeline ID is MALI_TIMELINE_NONE the tracker will NOT be added to a timeline and the
+ * point will be MALI_TIMELINE_NO_POINT.
+ *
+ * NOTE: the tracker can fail to be added if the timeline is full. If this happens, the
+ * point will be MALI_TIMELINE_NO_POINT. */
+ MALI_DEBUG_ASSERT(timeline_id < MALI_TIMELINE_MAX || timeline_id == MALI_TIMELINE_NONE);
+ if (likely(timeline_id < MALI_TIMELINE_MAX)) {
+ struct mali_timeline *timeline = system->timelines[timeline_id];
+ mali_timeline_insert_tracker(timeline, tracker);
+ MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+ }
+
+ point = tracker->point;
+
+ /* Create waiters for tracker based on supplied fence. Each waiter will increase the
+ * trigger ref count. */
+ mali_timeline_system_create_waiters_and_unlock(system, tracker, waiter_tail, waiter_head);
+ tracker = NULL;
+
+ /* At this point the tracker object might have been freed so we should no longer
+ * access it. */
+
+
+ /* The tracker will always be activated after calling add_tracker, even if NO_POINT is
+ * returned. */
+ return point;
+}
+
+static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system,
+ struct mali_timeline_waiter *waiter)
+{
+ struct mali_timeline_tracker *tracker;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(waiter);
+
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
+
+ tracker = waiter->tracker;
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ /* At this point the waiter has been removed from the timeline's waiter list, but it is
+ * still on the tracker's waiter list. All of the tracker's waiters will be released when
+ * the tracker is activated. */
+
+ waiter->point = MALI_TIMELINE_NO_POINT;
+ waiter->tracker = NULL;
+
+ tracker->trigger_ref_count--;
+ if (0 == tracker->trigger_ref_count) {
+ /* This was the last waiter; activate tracker */
+ schedule_mask |= mali_timeline_tracker_activate(tracker);
+ tracker = NULL;
+ }
+
+ return schedule_mask;
+}
+
+mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system,
+ enum mali_timeline_id timeline_id)
+{
+ mali_timeline_point point;
+ struct mali_timeline *timeline;
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ if (MALI_TIMELINE_MAX <= timeline_id) {
+ return MALI_TIMELINE_NO_POINT;
+ }
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ timeline = system->timelines[timeline_id];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ point = MALI_TIMELINE_NO_POINT;
+ if (timeline->point_oldest != timeline->point_next) {
+ point = timeline->point_next - 1;
+ if (MALI_TIMELINE_NO_POINT == point) point--;
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ return point;
+}
+
+void mali_timeline_initialize(void)
+{
+ _mali_osk_atomic_init(&gp_tracker_count, 0);
+ _mali_osk_atomic_init(&phy_pp_tracker_count, 0);
+ _mali_osk_atomic_init(&virt_pp_tracker_count, 0);
+}
+
+void mali_timeline_terminate(void)
+{
+ _mali_osk_atomic_term(&gp_tracker_count);
+ _mali_osk_atomic_term(&phy_pp_tracker_count);
+ _mali_osk_atomic_term(&virt_pp_tracker_count);
+}
+
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+
+static mali_bool is_waiting_on_timeline(struct mali_timeline_tracker *tracker, enum mali_timeline_id id)
+{
+ struct mali_timeline *timeline;
+ struct mali_timeline_system *system;
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ MALI_DEBUG_ASSERT_POINTER(tracker->timeline);
+ timeline = tracker->timeline;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline->system);
+ system = timeline->system;
+
+ if (MALI_TIMELINE_MAX > id) {
+ if (MALI_TIMELINE_NO_POINT != tracker->fence.points[id]) {
+ return mali_timeline_is_point_on(system->timelines[id], tracker->fence.points[id]);
+ } else {
+ return MALI_FALSE;
+ }
+ } else {
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NONE == id);
+ return MALI_FALSE;
+ }
+}
+
+static const char *timeline_id_to_string(enum mali_timeline_id id)
+{
+ switch (id) {
+ case MALI_TIMELINE_GP:
+ return "GP";
+ case MALI_TIMELINE_PP:
+ return "PP";
+ case MALI_TIMELINE_SOFT:
+ return "SOFT";
+ default:
+ return "NONE";
+ }
+}
+
+static const char *timeline_tracker_type_to_string(enum mali_timeline_tracker_type type)
+{
+ switch (type) {
+ case MALI_TIMELINE_TRACKER_GP:
+ return "GP";
+ case MALI_TIMELINE_TRACKER_PP:
+ return "PP";
+ case MALI_TIMELINE_TRACKER_SOFT:
+ return "SOFT";
+ case MALI_TIMELINE_TRACKER_WAIT:
+ return "WAIT";
+ case MALI_TIMELINE_TRACKER_SYNC:
+ return "SYNC";
+ default:
+ return "INVALID";
+ }
+}
+
+mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_timeline_tracker *tracker)
+{
+ struct mali_timeline *timeline = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ timeline = tracker->timeline;
+
+ if (0 != tracker->trigger_ref_count) {
+ return MALI_TIMELINE_TS_WAITING;
+ }
+
+ if (timeline && (timeline->tracker_tail == tracker || NULL != tracker->timeline_prev)) {
+ return MALI_TIMELINE_TS_ACTIVE;
+ }
+
+ if (timeline && (MALI_TIMELINE_NO_POINT == tracker->point)) {
+ return MALI_TIMELINE_TS_INIT;
+ }
+
+ return MALI_TIMELINE_TS_FINISH;
+}
+
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx)
+{
+ const char *tracker_state = "IWAF";
+ char state_char = 'I';
+ char tracker_type[32] = {0};
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker));
+ _mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type));
+
+#if defined(CONFIG_SYNC)
+ if (0 != tracker->trigger_ref_count) {
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u), fd:%d, fence:(0x%08X)] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+ tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
+ } else {
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c fd:%d fence:(0x%08X) job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char,
+ tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
+ }
+#else
+ if (0 != tracker->trigger_ref_count) {
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+ tracker->job);
+ } else {
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char,
+ tracker->job);
+ }
+#endif
+}
+
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx)
+{
+ struct mali_timeline_tracker *tracker = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ tracker = timeline->tracker_tail;
+ while (NULL != tracker) {
+ mali_timeline_debug_print_tracker(tracker, print_ctx);
+ tracker = tracker->timeline_next;
+ }
+}
+
+#if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+void mali_timeline_debug_direct_print_tracker(struct mali_timeline_tracker *tracker)
+{
+ const char *tracker_state = "IWAF";
+ char state_char = 'I';
+ char tracker_type[32] = {0};
+
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker));
+ _mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type));
+
+#if defined(CONFIG_SYNC)
+ if (0 != tracker->trigger_ref_count) {
+ MALI_PRINT(("TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u), fd:%d, fence:(0x%08X)] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+ tracker->fence.sync_fd, tracker->sync_fence, tracker->job));
+ } else {
+ MALI_PRINT(("TL: %s %u %c fd:%d fence:(0x%08X) job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char,
+ tracker->fence.sync_fd, tracker->sync_fence, tracker->job));
+ }
+#else
+ if (0 != tracker->trigger_ref_count) {
+ MALI_PRINT(("TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+ tracker->job));
+ } else {
+ MALI_PRINT(("TL: %s %u %c job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char,
+ tracker->job));
+ }
+#endif
+}
+
+void mali_timeline_debug_direct_print_timeline(struct mali_timeline *timeline)
+{
+ struct mali_timeline_tracker *tracker = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ tracker = timeline->tracker_tail;
+ while (NULL != tracker) {
+ mali_timeline_debug_direct_print_tracker(tracker);
+ tracker = tracker->timeline_next;
+ }
+}
+
+#endif
+
+void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx)
+{
+ int i;
+ int num_printed = 0;
+ u32 tid = _mali_osk_get_tid();
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ /* Print all timelines */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ if (NULL == timeline->tracker_head) continue;
+
+ _mali_osk_ctxprintf(print_ctx, "TL: Timeline %s:\n",
+ timeline_id_to_string((enum mali_timeline_id)i));
+
+ mali_timeline_debug_print_timeline(timeline, print_ctx);
+ num_printed++;
+ }
+
+ if (0 == num_printed) {
+ _mali_osk_ctxprintf(print_ctx, "TL: All timelines empty\n");
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+}
+
+#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
diff --git a/drivers/gpu/arm/utgard/common/mali_timeline.h b/drivers/gpu/arm/utgard/common/mali_timeline.h
new file mode 100644
index 000000000000..58d83839f4fe
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_timeline.h
@@ -0,0 +1,535 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMELINE_H__
+#define __MALI_TIMELINE_H__
+
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "mali_spinlock_reentrant.h"
+#include "mali_sync.h"
+#include "mali_scheduler_types.h"
+#include <linux/version.h>
+
+/**
+ * Soft job timeout.
+ *
+ * Soft jobs have to be signaled as complete after activation. Normally this is done by user space,
+ * but in order to guarantee that every soft job is completed, we also have a timer.
+ */
+#define MALI_TIMELINE_TIMEOUT_HZ ((unsigned long) (HZ * 3 / 2)) /* 1500 ms. */
+
+/**
+ * Timeline type.
+ */
+typedef enum mali_timeline_id {
+ MALI_TIMELINE_GP = MALI_UK_TIMELINE_GP, /**< GP job timeline. */
+ MALI_TIMELINE_PP = MALI_UK_TIMELINE_PP, /**< PP job timeline. */
+ MALI_TIMELINE_SOFT = MALI_UK_TIMELINE_SOFT, /**< Soft job timeline. */
+ MALI_TIMELINE_MAX = MALI_UK_TIMELINE_MAX
+} mali_timeline_id;
+
+/**
+ * Used by trackers that should not be added to a timeline (@ref mali_timeline_system_add_tracker).
+ */
+#define MALI_TIMELINE_NONE MALI_TIMELINE_MAX
+
+/**
+ * Tracker type.
+ */
+typedef enum mali_timeline_tracker_type {
+ MALI_TIMELINE_TRACKER_GP = 0, /**< Tracker used by GP jobs. */
+ MALI_TIMELINE_TRACKER_PP = 1, /**< Tracker used by PP jobs. */
+ MALI_TIMELINE_TRACKER_SOFT = 2, /**< Tracker used by soft jobs. */
+ MALI_TIMELINE_TRACKER_WAIT = 3, /**< Tracker used for fence wait. */
+ MALI_TIMELINE_TRACKER_SYNC = 4, /**< Tracker used for sync fence. */
+ MALI_TIMELINE_TRACKER_MAX = 5,
+} mali_timeline_tracker_type;
+
+/**
+ * Tracker activation error.
+ */
+typedef u32 mali_timeline_activation_error;
+#define MALI_TIMELINE_ACTIVATION_ERROR_NONE 0
+#define MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT (1<<1)
+#define MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT (1<<0)
+
+/**
+ * Type used to represent a point on a timeline.
+ */
+typedef u32 mali_timeline_point;
+
+/**
+ * Used to represent that no point on a timeline.
+ */
+#define MALI_TIMELINE_NO_POINT ((mali_timeline_point) 0)
+
+/**
+ * The maximum span of points on a timeline. A timeline will be considered full if the difference
+ * between the oldest and newest points is equal or larger to this value.
+ */
+#define MALI_TIMELINE_MAX_POINT_SPAN 65536
+
+/**
+ * Magic value used to assert on validity of trackers.
+ */
+#define MALI_TIMELINE_TRACKER_MAGIC 0xabcdabcd
+
+struct mali_timeline;
+struct mali_timeline_waiter;
+struct mali_timeline_tracker;
+
+/**
+ * Timeline fence.
+ */
+struct mali_timeline_fence {
+ mali_timeline_point points[MALI_TIMELINE_MAX]; /**< For each timeline, a point or MALI_TIMELINE_NO_POINT. */
+ s32 sync_fd; /**< A file descriptor representing a sync fence, or -1. */
+};
+
+/**
+ * Timeline system.
+ *
+ * The Timeline system has a set of timelines associated with a session.
+ */
+struct mali_timeline_system {
+ struct mali_spinlock_reentrant *spinlock; /**< Spin lock protecting the timeline system */
+ struct mali_timeline *timelines[MALI_TIMELINE_MAX]; /**< The timelines in this system */
+
+ /* Single-linked list of unused waiter objects. Uses the tracker_next field in tracker. */
+ struct mali_timeline_waiter *waiter_empty_list;
+
+ struct mali_session_data *session; /**< Session that owns this system. */
+
+ mali_bool timer_enabled; /**< Set to MALI_TRUE if soft job timer should be enabled, MALI_FALSE if not. */
+
+ _mali_osk_wait_queue_t *wait_queue; /**< Wait queue. */
+
+#if defined(CONFIG_SYNC)
+ struct sync_timeline *signaled_sync_tl; /**< Special sync timeline used to create pre-signaled sync fences */
+#endif /* defined(CONFIG_SYNC) */
+};
+
+/**
+ * Timeline. Each Timeline system will have MALI_TIMELINE_MAX timelines.
+ */
+struct mali_timeline {
+ mali_timeline_point point_next; /**< The next available point. */
+ mali_timeline_point point_oldest; /**< The oldest point not released. */
+
+ /* Double-linked list of trackers. Sorted in ascending order by tracker->time_number with
+ * tail pointing to the tracker with the oldest time. */
+ struct mali_timeline_tracker *tracker_head;
+ struct mali_timeline_tracker *tracker_tail;
+
+ /* Double-linked list of waiters. Sorted in ascending order by waiter->time_number_wait
+ * with tail pointing to the waiter with oldest wait time. */
+ struct mali_timeline_waiter *waiter_head;
+ struct mali_timeline_waiter *waiter_tail;
+
+ struct mali_timeline_system *system; /**< Timeline system this timeline belongs to. */
+ enum mali_timeline_id id; /**< Timeline type. */
+
+#if defined(CONFIG_SYNC)
+ struct sync_timeline *sync_tl; /**< Sync timeline that corresponds to this timeline. */
+ mali_bool destroyed;
+ struct mali_spinlock_reentrant *spinlock; /**< Spin lock protecting the timeline system */
+#endif /* defined(CONFIG_SYNC) */
+
+ /* The following fields are used to time out soft job trackers. */
+ _mali_osk_wq_delayed_work_t *delayed_work;
+ mali_bool timer_active;
+};
+
+/**
+ * Timeline waiter.
+ */
+struct mali_timeline_waiter {
+ mali_timeline_point point; /**< Point on timeline we are waiting for to be released. */
+ struct mali_timeline_tracker *tracker; /**< Tracker that is waiting. */
+
+ struct mali_timeline_waiter *timeline_next; /**< Next waiter on timeline's waiter list. */
+ struct mali_timeline_waiter *timeline_prev; /**< Previous waiter on timeline's waiter list. */
+
+ struct mali_timeline_waiter *tracker_next; /**< Next waiter on tracker's waiter list. */
+};
+
+/**
+ * Timeline tracker.
+ */
+struct mali_timeline_tracker {
+ MALI_DEBUG_CODE(u32 magic); /**< Should always be MALI_TIMELINE_TRACKER_MAGIC for a valid tracker. */
+
+ mali_timeline_point point; /**< Point on timeline for this tracker */
+
+ struct mali_timeline_tracker *timeline_next; /**< Next tracker on timeline's tracker list */
+ struct mali_timeline_tracker *timeline_prev; /**< Previous tracker on timeline's tracker list */
+
+ u32 trigger_ref_count; /**< When zero tracker will be activated */
+ mali_timeline_activation_error activation_error; /**< Activation error. */
+ struct mali_timeline_fence fence; /**< Fence used to create this tracker */
+
+ /* Single-linked list of waiters. Sorted in order of insertions with
+ * tail pointing to first waiter. */
+ struct mali_timeline_waiter *waiter_head;
+ struct mali_timeline_waiter *waiter_tail;
+
+#if defined(CONFIG_SYNC)
+ /* These are only used if the tracker is waiting on a sync fence. */
+ struct mali_timeline_waiter *waiter_sync; /**< A direct pointer to timeline waiter representing sync fence. */
+ struct sync_fence_waiter sync_fence_waiter; /**< Used to connect sync fence and tracker in sync fence wait callback. */
+ struct sync_fence *sync_fence; /**< The sync fence this tracker is waiting on. */
+ _mali_osk_list_t sync_fence_cancel_list; /**< List node used to cancel sync fence waiters. */
+#endif /* defined(CONFIG_SYNC) */
+
+ struct mali_timeline_system *system; /**< Timeline system. */
+ struct mali_timeline *timeline; /**< Timeline, or NULL if not on a timeline. */
+ enum mali_timeline_tracker_type type; /**< Type of tracker. */
+ void *job; /**< Owner of tracker. */
+
+ /* The following fields are used to time out soft job trackers. */
+ unsigned long os_tick_create;
+ unsigned long os_tick_activate;
+ mali_bool timer_active;
+};
+
+extern _mali_osk_atomic_t gp_tracker_count;
+extern _mali_osk_atomic_t phy_pp_tracker_count;
+extern _mali_osk_atomic_t virt_pp_tracker_count;
+
+/**
+ * What follows is a set of functions to check the state of a timeline and to determine where on a
+ * timeline a given point is. Most of these checks will translate the timeline so the oldest point
+ * on the timeline is aligned with zero. Remember that all of these calculation are done on
+ * unsigned integers.
+ *
+ * The following example illustrates the three different states a point can be in. The timeline has
+ * been translated to put the oldest point at zero:
+ *
+ *
+ *
+ * [ point is in forbidden zone ]
+ * 64k wide
+ * MALI_TIMELINE_MAX_POINT_SPAN
+ *
+ * [ point is on timeline ) ( point is released ]
+ *
+ * 0--------------------------##############################--------------------2^32 - 1
+ * ^ ^
+ * \ |
+ * oldest point on timeline |
+ * \
+ * next point on timeline
+ */
+
+/**
+ * Compare two timeline points
+ *
+ * Returns true if a is after b, false if a is before or equal to b.
+ *
+ * This funcion ignores MALI_TIMELINE_MAX_POINT_SPAN. Wrapping is supported and
+ * the result will be correct if the points is less then UINT_MAX/2 apart.
+ *
+ * @param a Point on timeline
+ * @param b Point on timeline
+ * @return MALI_TRUE if a is after b
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_point_after(mali_timeline_point a, mali_timeline_point b)
+{
+ return 0 > ((s32)b) - ((s32)a);
+}
+
+/**
+ * Check if a point is on timeline. A point is on a timeline if it is greater than, or equal to,
+ * the oldest point, and less than the next point.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point is on timeline, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_on(struct mali_timeline *timeline, mali_timeline_point point)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+ return (point - timeline->point_oldest) < (timeline->point_next - timeline->point_oldest);
+}
+
+/**
+ * Check if a point has been released. A point is released if it is older than the oldest point on
+ * the timeline, newer than the next point, and also not in the forbidden zone.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point has been release, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_released(struct mali_timeline *timeline, mali_timeline_point point)
+{
+ mali_timeline_point point_normalized;
+ mali_timeline_point next_normalized;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+ point_normalized = point - timeline->point_oldest;
+ next_normalized = timeline->point_next - timeline->point_oldest;
+
+ return point_normalized > (next_normalized + MALI_TIMELINE_MAX_POINT_SPAN);
+}
+
+/**
+ * Check if a point is valid. A point is valid if is on the timeline or has been released.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return MALI_TRUE if point is valid, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_point_valid(struct mali_timeline *timeline, mali_timeline_point point)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ return mali_timeline_is_point_on(timeline, point) || mali_timeline_is_point_released(timeline, point);
+}
+
+/**
+ * Check if timeline is empty (has no points on it). A timeline is empty if next == oldest.
+ *
+ * @param timeline Timeline.
+ * @return MALI_TRUE if timeline is empty, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_empty(struct mali_timeline *timeline)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ return timeline->point_next == timeline->point_oldest;
+}
+
+/**
+ * Check if timeline is full. A valid timeline cannot span more than 64k points (@ref
+ * MALI_TIMELINE_MAX_POINT_SPAN).
+ *
+ * @param timeline Timeline.
+ * @return MALI_TRUE if timeline is full, MALI_FALSE if not.
+ */
+MALI_STATIC_INLINE mali_bool mali_timeline_is_full(struct mali_timeline *timeline)
+{
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ return MALI_TIMELINE_MAX_POINT_SPAN <= (timeline->point_next - timeline->point_oldest);
+}
+
+/**
+ * Create a new timeline system.
+ *
+ * @param session The session this timeline system will belong to.
+ * @return New timeline system.
+ */
+struct mali_timeline_system *mali_timeline_system_create(struct mali_session_data *session);
+
+/**
+ * Abort timeline system.
+ *
+ * This will release all pending waiters in the timeline system causing all trackers to be
+ * activated.
+ *
+ * @param system Timeline system to abort all jobs from.
+ */
+void mali_timeline_system_abort(struct mali_timeline_system *system);
+
+/**
+ * Destroy an empty timeline system.
+ *
+ * @note @ref mali_timeline_system_abort() should be called prior to this function.
+ *
+ * @param system Timeline system to destroy.
+ */
+void mali_timeline_system_destroy(struct mali_timeline_system *system);
+
+/**
+ * Stop the soft job timer.
+ *
+ * @param system Timeline system
+ */
+void mali_timeline_system_stop_timer(struct mali_timeline_system *system);
+
+/**
+ * Add a tracker to a timeline system and optionally also on a timeline.
+ *
+ * Once added to the timeline system, the tracker is guaranteed to be activated. The tracker can be
+ * activated before this function returns. Thus, it is also possible that the tracker is released
+ * before this function returns, depending on the tracker type.
+ *
+ * @note Tracker must be initialized (@ref mali_timeline_tracker_init) before being added to the
+ * timeline system.
+ *
+ * @param system Timeline system the tracker will be added to.
+ * @param tracker The tracker to be added.
+ * @param timeline_id Id of the timeline the tracker will be added to, or
+ * MALI_TIMELINE_NONE if it should not be added on a timeline.
+ * @return Point on timeline identifying this tracker, or MALI_TIMELINE_NO_POINT if not on timeline.
+ */
+mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system,
+ struct mali_timeline_tracker *tracker,
+ enum mali_timeline_id timeline_id);
+
+/**
+ * Get latest point on timeline.
+ *
+ * @param system Timeline system.
+ * @param timeline_id Id of timeline to get latest point from.
+ * @return Latest point on timeline, or MALI_TIMELINE_NO_POINT if the timeline is empty.
+ */
+mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system,
+ enum mali_timeline_id timeline_id);
+
+/**
+ * Initialize tracker.
+ *
+ * Must be called before tracker is added to timeline system (@ref mali_timeline_system_add_tracker).
+ *
+ * @param tracker Tracker to initialize.
+ * @param type Type of tracker.
+ * @param fence Fence used to set up dependencies for tracker.
+ * @param job Pointer to job struct this tracker is associated with.
+ */
+void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker,
+ mali_timeline_tracker_type type,
+ struct mali_timeline_fence *fence,
+ void *job);
+
+/**
+ * Grab trigger ref count on tracker.
+ *
+ * This will prevent tracker from being activated until the trigger ref count reaches zero.
+ *
+ * @note Tracker must have been initialized (@ref mali_timeline_tracker_init).
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker.
+ */
+void mali_timeline_system_tracker_get(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker);
+
+/**
+ * Release trigger ref count on tracker.
+ *
+ * If the trigger ref count reaches zero, the tracker will be activated.
+ *
+ * @param system Timeline system.
+ * @param tracker Tracker.
+ * @param activation_error Error bitmask if activated with error, or MALI_TIMELINE_ACTIVATION_ERROR_NONE if no error.
+ * @return Scheduling bitmask.
+ */
+mali_scheduler_mask mali_timeline_system_tracker_put(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker, mali_timeline_activation_error activation_error);
+
+/**
+ * Release a tracker from the timeline system.
+ *
+ * This is used to signal that the job being tracker is finished, either due to normal circumstances
+ * (job complete/abort) or due to a timeout.
+ *
+ * We may need to schedule some subsystems after a tracker has been released and the returned
+ * bitmask will tell us if it is necessary. If the return value is non-zero, this value needs to be
+ * sent as an input parameter to @ref mali_scheduler_schedule_from_mask() to do the scheduling.
+ *
+ * @note Tracker must have been activated before being released.
+ * @warning Not calling @ref mali_scheduler_schedule_from_mask() after releasing a tracker can lead
+ * to a deadlock.
+ *
+ * @param tracker Tracker being released.
+ * @return Scheduling bitmask.
+ */
+mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker);
+
+MALI_STATIC_INLINE mali_bool mali_timeline_tracker_activation_error(
+ struct mali_timeline_tracker *tracker)
+{
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ return (MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT &
+ tracker->activation_error) ? MALI_TRUE : MALI_FALSE;
+}
+
+/**
+ * Copy data from a UK fence to a Timeline fence.
+ *
+ * @param fence Timeline fence.
+ * @param uk_fence UK fence.
+ */
+void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence);
+
+void mali_timeline_initialize(void);
+
+void mali_timeline_terminate(void);
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_gp_job(void)
+{
+ return 0 < _mali_osk_atomic_read(&gp_tracker_count);
+}
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_physical_pp_job(void)
+{
+ return 0 < _mali_osk_atomic_read(&phy_pp_tracker_count);
+}
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_virtual_pp_job(void)
+{
+ return 0 < _mali_osk_atomic_read(&virt_pp_tracker_count);
+}
+
+#if defined(DEBUG)
+#define MALI_TIMELINE_DEBUG_FUNCTIONS
+#endif /* DEBUG */
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+
+/**
+ * Tracker state. Used for debug printing.
+ */
+typedef enum mali_timeline_tracker_state {
+ MALI_TIMELINE_TS_INIT = 0,
+ MALI_TIMELINE_TS_WAITING = 1,
+ MALI_TIMELINE_TS_ACTIVE = 2,
+ MALI_TIMELINE_TS_FINISH = 3,
+} mali_timeline_tracker_state;
+
+/**
+ * Get tracker state.
+ *
+ * @param tracker Tracker to check.
+ * @return State of tracker.
+ */
+mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_timeline_tracker *tracker);
+
+/**
+ * Print debug information about tracker.
+ *
+ * @param tracker Tracker to print.
+ */
+void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx);
+
+/**
+ * Print debug information about timeline.
+ *
+ * @param timeline Timeline to print.
+ */
+void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx);
+
+#if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+void mali_timeline_debug_direct_print_tracker(struct mali_timeline_tracker *tracker);
+void mali_timeline_debug_direct_print_timeline(struct mali_timeline *timeline);
+#endif
+
+/**
+ * Print debug information about timeline system.
+ *
+ * @param system Timeline system to print.
+ */
+void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx);
+
+#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
+
+#endif /* __MALI_TIMELINE_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.c b/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.c
new file mode 100644
index 000000000000..3c58928dd3a2
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timeline_fence_wait.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_spinlock_reentrant.h"
+
+/**
+ * Allocate a fence waiter tracker.
+ *
+ * @return New fence waiter if successful, NULL if not.
+ */
+static struct mali_timeline_fence_wait_tracker *mali_timeline_fence_wait_tracker_alloc(void)
+{
+ return (struct mali_timeline_fence_wait_tracker *) _mali_osk_calloc(1, sizeof(struct mali_timeline_fence_wait_tracker));
+}
+
+/**
+ * Free fence waiter tracker.
+ *
+ * @param wait Fence wait tracker to free.
+ */
+static void mali_timeline_fence_wait_tracker_free(struct mali_timeline_fence_wait_tracker *wait)
+{
+ MALI_DEBUG_ASSERT_POINTER(wait);
+ _mali_osk_atomic_term(&wait->refcount);
+ _mali_osk_free(wait);
+}
+
+/**
+ * Check if fence wait tracker has been activated. Used as a wait queue condition.
+ *
+ * @param data Fence waiter.
+ * @return MALI_TRUE if tracker has been activated, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_fence_wait_tracker_is_activated(void *data)
+{
+ struct mali_timeline_fence_wait_tracker *wait;
+
+ wait = (struct mali_timeline_fence_wait_tracker *) data;
+ MALI_DEBUG_ASSERT_POINTER(wait);
+
+ return wait->activated;
+}
+
+/**
+ * Check if fence has been signaled.
+ *
+ * @param system Timeline system.
+ * @param fence Timeline fence.
+ * @return MALI_TRUE if fence is signaled, MALI_FALSE if not.
+ */
+static mali_bool mali_timeline_fence_wait_check_status(struct mali_timeline_system *system, struct mali_timeline_fence *fence)
+{
+ int i;
+ u32 tid = _mali_osk_get_tid();
+ mali_bool ret = MALI_TRUE;
+#if defined(CONFIG_SYNC)
+ struct sync_fence *sync_fence = NULL;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline;
+ mali_timeline_point point;
+
+ point = fence->points[i];
+
+ if (likely(MALI_TIMELINE_NO_POINT == point)) {
+ /* Fence contains no point on this timeline. */
+ continue;
+ }
+
+ timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ if (unlikely(!mali_timeline_is_point_valid(timeline, point))) {
+ MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n", point, timeline->point_oldest, timeline->point_next));
+ }
+
+ if (!mali_timeline_is_point_released(timeline, point)) {
+ ret = MALI_FALSE;
+ goto exit;
+ }
+ }
+
+#if defined(CONFIG_SYNC)
+ if (-1 != fence->sync_fd) {
+ sync_fence = sync_fence_fdget(fence->sync_fd);
+ if (likely(NULL != sync_fence)) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ if (0 == sync_fence->status) {
+#else
+ if (0 == atomic_read(&sync_fence->status)) {
+#endif
+ ret = MALI_FALSE;
+ }
+ } else {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", fence->sync_fd));
+ }
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+exit:
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+#if defined(CONFIG_SYNC)
+ if (NULL != sync_fence) {
+ sync_fence_put(sync_fence);
+ }
+#endif /* defined(CONFIG_SYNC) */
+
+ return ret;
+}
+
+mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout)
+{
+ struct mali_timeline_fence_wait_tracker *wait;
+ mali_timeline_point point;
+ mali_bool ret;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: wait on fence\n"));
+
+ if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY == timeout) {
+ return mali_timeline_fence_wait_check_status(system, fence);
+ }
+
+ wait = mali_timeline_fence_wait_tracker_alloc();
+ if (unlikely(NULL == wait)) {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to allocate data for fence wait\n"));
+ return MALI_FALSE;
+ }
+
+ wait->activated = MALI_FALSE;
+ wait->system = system;
+
+ /* Initialize refcount to two references. The reference first will be released by this
+ * function after the wait is over. The second reference will be released when the tracker
+ * is activated. */
+ _mali_osk_atomic_init(&wait->refcount, 2);
+
+ /* Add tracker to timeline system, but not to a timeline. */
+ mali_timeline_tracker_init(&wait->tracker, MALI_TIMELINE_TRACKER_WAIT, fence, wait);
+ point = mali_timeline_system_add_tracker(system, &wait->tracker, MALI_TIMELINE_NONE);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point);
+ MALI_IGNORE(point);
+
+ /* Wait for the tracker to be activated or time out. */
+ if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER == timeout) {
+ _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait);
+ } else {
+ _mali_osk_wait_queue_wait_event_timeout(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait, timeout);
+ }
+
+ ret = wait->activated;
+
+ if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) {
+ mali_timeline_fence_wait_tracker_free(wait);
+ }
+
+ return ret;
+}
+
+void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *wait)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(wait);
+ MALI_DEBUG_ASSERT_POINTER(wait->system);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for fence wait tracker\n"));
+
+ MALI_DEBUG_ASSERT(MALI_FALSE == wait->activated);
+ wait->activated = MALI_TRUE;
+
+ _mali_osk_wait_queue_wake_up(wait->system->wait_queue);
+
+ /* Nothing can wait on this tracker, so nothing to schedule after release. */
+ schedule_mask = mali_timeline_tracker_release(&wait->tracker);
+ MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask);
+ MALI_IGNORE(schedule_mask);
+
+ if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) {
+ mali_timeline_fence_wait_tracker_free(wait);
+ }
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.h b/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.h
new file mode 100644
index 000000000000..f5440ab6fc6d
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_timeline_fence_wait.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_timeline_fence_wait.h
+ *
+ * This file contains functions used to wait until a Timeline fence is signaled.
+ */
+
+#ifndef __MALI_TIMELINE_FENCE_WAIT_H__
+#define __MALI_TIMELINE_FENCE_WAIT_H__
+
+#include "mali_osk.h"
+#include "mali_timeline.h"
+
+/**
+ * If used as the timeout argument in @ref mali_timeline_fence_wait, a timer is not used and the
+ * function only returns when the fence is signaled.
+ */
+#define MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER ((u32) -1)
+
+/**
+ * If used as the timeout argument in @ref mali_timeline_fence_wait, the function will return
+ * immediately with the current state of the fence.
+ */
+#define MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY 0
+
+/**
+ * Fence wait tracker.
+ *
+ * The fence wait tracker is added to the Timeline system with the fence we are waiting on as a
+ * dependency. We will then perform a blocking wait, possibly with a timeout, until the tracker is
+ * activated, which happens when the fence is signaled.
+ */
+struct mali_timeline_fence_wait_tracker {
+ mali_bool activated; /**< MALI_TRUE if the tracker has been activated, MALI_FALSE if not. */
+ _mali_osk_atomic_t refcount; /**< Reference count. */
+ struct mali_timeline_system *system; /**< Timeline system. */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker. */
+};
+
+/**
+ * Wait for a fence to be signaled, or timeout is reached.
+ *
+ * @param system Timeline system.
+ * @param fence Fence to wait on.
+ * @param timeout Timeout in ms, or MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER or
+ * MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY.
+ * @return MALI_TRUE if signaled, MALI_FALSE if timed out.
+ */
+mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout);
+
+/**
+ * Used by the Timeline system to activate a fence wait tracker.
+ *
+ * @param fence_wait_tracker Fence waiter tracker.
+ */
+void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *fence_wait_tracker);
+
+#endif /* __MALI_TIMELINE_FENCE_WAIT_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.c b/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.c
new file mode 100644
index 000000000000..73843f07c990
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timeline_sync_fence.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_sync.h"
+
+#if defined(CONFIG_SYNC)
+
+/**
+ * Creates a sync fence tracker and a sync fence. Adds sync fence tracker to Timeline system and
+ * returns sync fence. The sync fence will be signaled when the sync fence tracker is activated.
+ *
+ * @param timeline Timeline.
+ * @param point Point on timeline.
+ * @return Sync fence that will be signaled when tracker is activated.
+ */
+static struct sync_fence *mali_timeline_sync_fence_create_and_add_tracker(struct mali_timeline *timeline, mali_timeline_point point)
+{
+ struct mali_timeline_sync_fence_tracker *sync_fence_tracker;
+ struct sync_fence *sync_fence;
+ struct mali_timeline_fence fence;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point);
+
+ /* Allocate sync fence tracker. */
+ sync_fence_tracker = _mali_osk_calloc(1, sizeof(struct mali_timeline_sync_fence_tracker));
+ if (NULL == sync_fence_tracker) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync_fence_tracker allocation failed\n"));
+ return NULL;
+ }
+
+ /* Create sync flag. */
+ MALI_DEBUG_ASSERT_POINTER(timeline->sync_tl);
+ sync_fence_tracker->flag = mali_sync_flag_create(timeline->sync_tl, point);
+ if (NULL == sync_fence_tracker->flag) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync_flag creation failed\n"));
+ _mali_osk_free(sync_fence_tracker);
+ return NULL;
+ }
+
+ /* Create sync fence from sync flag. */
+ sync_fence = mali_sync_flag_create_fence(sync_fence_tracker->flag);
+ if (NULL == sync_fence) {
+ MALI_PRINT_ERROR(("Mali Timeline: sync_fence creation failed\n"));
+ mali_sync_flag_put(sync_fence_tracker->flag);
+ _mali_osk_free(sync_fence_tracker);
+ return NULL;
+ }
+
+ /* Setup fence for tracker. */
+ _mali_osk_memset(&fence, 0, sizeof(struct mali_timeline_fence));
+ fence.sync_fd = -1;
+ fence.points[timeline->id] = point;
+
+ /* Finally, add the tracker to Timeline system. */
+ mali_timeline_tracker_init(&sync_fence_tracker->tracker, MALI_TIMELINE_TRACKER_SYNC, &fence, sync_fence_tracker);
+ point = mali_timeline_system_add_tracker(timeline->system, &sync_fence_tracker->tracker, MALI_TIMELINE_NONE);
+ MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point);
+
+ return sync_fence;
+}
+
+s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct mali_timeline_fence *fence)
+{
+ u32 i;
+ struct sync_fence *sync_fence_acc = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline;
+ struct sync_fence *sync_fence;
+
+ if (MALI_TIMELINE_NO_POINT == fence->points[i]) continue;
+
+ timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ sync_fence = mali_timeline_sync_fence_create_and_add_tracker(timeline, fence->points[i]);
+ if (NULL == sync_fence) goto error;
+
+ if (NULL != sync_fence_acc) {
+ /* Merge sync fences. */
+ sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence);
+ if (NULL == sync_fence_acc) goto error;
+ } else {
+ /* This was the first sync fence created. */
+ sync_fence_acc = sync_fence;
+ }
+ }
+
+ if (-1 != fence->sync_fd) {
+ struct sync_fence *sync_fence;
+
+ sync_fence = sync_fence_fdget(fence->sync_fd);
+ if (NULL == sync_fence) goto error;
+
+ if (NULL != sync_fence_acc) {
+ sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence);
+ if (NULL == sync_fence_acc) goto error;
+ } else {
+ sync_fence_acc = sync_fence;
+ }
+ }
+
+ if (NULL == sync_fence_acc) {
+ MALI_DEBUG_ASSERT_POINTER(system->signaled_sync_tl);
+
+ /* There was nothing to wait on, so return an already signaled fence. */
+
+ sync_fence_acc = mali_sync_timeline_create_signaled_fence(system->signaled_sync_tl);
+ if (NULL == sync_fence_acc) goto error;
+ }
+
+ /* Return file descriptor for the accumulated sync fence. */
+ return mali_sync_fence_fd_alloc(sync_fence_acc);
+
+error:
+ if (NULL != sync_fence_acc) {
+ sync_fence_put(sync_fence_acc);
+ }
+
+ return -1;
+}
+
+void mali_timeline_sync_fence_activate(struct mali_timeline_sync_fence_tracker *sync_fence_tracker)
+{
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker);
+ MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker->flag);
+
+ MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for sync fence tracker\n"));
+
+ /* Signal flag and release reference. */
+ mali_sync_flag_signal(sync_fence_tracker->flag, 0);
+ mali_sync_flag_put(sync_fence_tracker->flag);
+
+ /* Nothing can wait on this tracker, so nothing to schedule after release. */
+ schedule_mask = mali_timeline_tracker_release(&sync_fence_tracker->tracker);
+ MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask);
+
+ _mali_osk_free(sync_fence_tracker);
+}
+
+#endif /* defined(CONFIG_SYNC) */
diff --git a/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.h b/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.h
new file mode 100644
index 000000000000..29a3822457e9
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_timeline_sync_fence.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_timeline_sync_fence.h
+ *
+ * This file contains code related to creating sync fences from timeline fences.
+ */
+
+#ifndef __MALI_TIMELINE_SYNC_FENCE_H__
+#define __MALI_TIMELINE_SYNC_FENCE_H__
+
+#include "mali_timeline.h"
+
+#if defined(CONFIG_SYNC)
+
+/**
+ * Sync fence tracker.
+ */
+struct mali_timeline_sync_fence_tracker {
+ struct mali_sync_flag *flag; /**< Sync flag used to connect tracker and sync fence. */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker. */
+};
+
+/**
+ * Create a sync fence that will be signaled when @ref fence is signaled.
+ *
+ * @param system Timeline system.
+ * @param fence Fence to create sync fence from.
+ * @return File descriptor for new sync fence, or -1 on error.
+ */
+s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct mali_timeline_fence *fence);
+
+/**
+ * Used by the Timeline system to activate a sync fence tracker.
+ *
+ * @param sync_fence_tracker Sync fence tracker.
+ *
+ */
+void mali_timeline_sync_fence_activate(struct mali_timeline_sync_fence_tracker *sync_fence_tracker);
+
+#endif /* defined(CONFIG_SYNC) */
+
+#endif /* __MALI_TIMELINE_SYNC_FENCE_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_ukk.h b/drivers/gpu/arm/utgard/common/mali_ukk.h
new file mode 100644
index 000000000000..597685a53f3b
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_ukk.h
@@ -0,0 +1,551 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __MALI_UKK_H__
+#define __MALI_UKK_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * - The _mali_uk functions are an abstraction of the interface to the device
+ * driver. On certain OSs, this would be implemented via the IOCTL interface.
+ * On other OSs, it could be via extension of some Device Driver Class, or
+ * direct function call for Bare metal/RTOSs.
+ * - It is important to note that:
+ * - The Device Driver has implemented the _mali_ukk set of functions
+ * - The Base Driver calls the corresponding set of _mali_uku functions.
+ * - What requires porting is solely the calling mechanism from User-side to
+ * Kernel-side, and propagating back the results.
+ * - Each U/K function is associated with a (group, number) pair from
+ * \ref _mali_uk_functions to make it possible for a common function in the
+ * Base Driver and Device Driver to route User/Kernel calls from/to the
+ * correct _mali_uk function. For example, in an IOCTL system, the IOCTL number
+ * would be formed based on the group and number assigned to the _mali_uk
+ * function, as listed in \ref _mali_uk_functions. On the user-side, each
+ * _mali_uku function would just make an IOCTL with the IOCTL-code being an
+ * encoded form of the (group, number) pair. On the kernel-side, the Device
+ * Driver's IOCTL handler decodes the IOCTL-code back into a (group, number)
+ * pair, and uses this to determine which corresponding _mali_ukk should be
+ * called.
+ * - Refer to \ref _mali_uk_functions for more information about this
+ * (group, number) pairing.
+ * - In a system where there is no distinction between user and kernel-side,
+ * the U/K interface may be implemented as:@code
+ * MALI_STATIC_INLINE _mali_osk_errcode_t _mali_uku_examplefunction( _mali_uk_examplefunction_s *args )
+ * {
+ * return mali_ukk_examplefunction( args );
+ * }
+ * @endcode
+ * - Therefore, all U/K calls behave \em as \em though they were direct
+ * function calls (but the \b implementation \em need \em not be a direct
+ * function calls)
+ *
+ * @note Naming the _mali_uk functions the same on both User and Kernel sides
+ * on non-RTOS systems causes debugging issues when setting breakpoints. In
+ * this case, it is not clear which function the breakpoint is put on.
+ * Therefore the _mali_uk functions in user space are prefixed with \c _mali_uku
+ * and in kernel space with \c _mali_ukk. The naming for the argument
+ * structures is unaffected.
+ *
+ * - The _mali_uk functions are synchronous.
+ * - Arguments to the _mali_uk functions are passed in a structure. The only
+ * parameter passed to the _mali_uk functions is a pointer to this structure.
+ * This first member of this structure, ctx, is a pointer to a context returned
+ * by _mali_uku_open(). For example:@code
+ * typedef struct
+ * {
+ * void *ctx;
+ * u32 number_of_cores;
+ * } _mali_uk_get_gp_number_of_cores_s;
+ * @endcode
+ *
+ * - Each _mali_uk function has its own argument structure named after the
+ * function. The argument is distinguished by the _s suffix.
+ * - The argument types are defined by the base driver and user-kernel
+ * interface.
+ * - All _mali_uk functions return a standard \ref _mali_osk_errcode_t.
+ * - Only arguments of type input or input/output need be initialized before
+ * calling a _mali_uk function.
+ * - Arguments of type output and input/output are only valid when the
+ * _mali_uk function returns \ref _MALI_OSK_ERR_OK.
+ * - The \c ctx member is always invalid after it has been used by a
+ * _mali_uk function, except for the context management functions
+ *
+ *
+ * \b Interface \b restrictions
+ *
+ * The requirements of the interface mean that an implementation of the
+ * User-kernel interface may do no 'real' work. For example, the following are
+ * illegal in the User-kernel implementation:
+ * - Calling functions necessary for operation on all systems, which would
+ * not otherwise get called on RTOS systems.
+ * - For example, a U/K interface that calls multiple _mali_ukk functions
+ * during one particular U/K call. This could not be achieved by the same code
+ * which uses direct function calls for the U/K interface.
+ * - Writing in values to the args members, when otherwise these members would
+ * not hold a useful value for a direct function call U/K interface.
+ * - For example, U/K interface implementation that take NULL members in
+ * their arguments structure from the user side, but those members are
+ * replaced with non-NULL values in the kernel-side of the U/K interface
+ * implementation. A scratch area for writing data is one such example. In this
+ * case, a direct function call U/K interface would segfault, because no code
+ * would be present to replace the NULL pointer with a meaningful pointer.
+ * - Note that we discourage the case where the U/K implementation changes
+ * a NULL argument member to non-NULL, and then the Device Driver code (outside
+ * of the U/K layer) re-checks this member for NULL, and corrects it when
+ * necessary. Whilst such code works even on direct function call U/K
+ * intefaces, it reduces the testing coverage of the Device Driver code. This
+ * is because we have no way of testing the NULL == value path on an OS
+ * implementation.
+ *
+ * A number of allowable examples exist where U/K interfaces do 'real' work:
+ * - The 'pointer switching' technique for \ref _mali_ukk_get_system_info
+ * - In this case, without the pointer switching on direct function call
+ * U/K interface, the Device Driver code still sees the same thing: a pointer
+ * to which it can write memory. This is because such a system has no
+ * distinction between a user and kernel pointer.
+ * - Writing an OS-specific value into the ukk_private member for
+ * _mali_ukk_mem_mmap().
+ * - In this case, this value is passed around by Device Driver code, but
+ * its actual value is never checked. Device Driver code simply passes it from
+ * the U/K layer to the OSK layer, where it can be acted upon. In this case,
+ * \em some OS implementations of the U/K (_mali_ukk_mem_mmap()) and OSK
+ * (_mali_osk_mem_mapregion_init()) functions will collaborate on the
+ * meaning of ukk_private member. On other OSs, it may be unused by both
+ * U/K and OSK layers
+ * - Therefore, on error inside the U/K interface implementation itself,
+ * it will be as though the _mali_ukk function itself had failed, and cleaned
+ * up after itself.
+ * - Compare this to a direct function call U/K implementation, where all
+ * error cleanup is handled by the _mali_ukk function itself. The direct
+ * function call U/K interface implementation is automatically atomic.
+ *
+ * The last example highlights a consequence of all U/K interface
+ * implementations: they must be atomic with respect to the Device Driver code.
+ * And therefore, should Device Driver code succeed but the U/K implementation
+ * fail afterwards (but before return to user-space), then the U/K
+ * implementation must cause appropriate cleanup actions to preserve the
+ * atomicity of the interface.
+ *
+ * @{
+ */
+
+
+/** @defgroup _mali_uk_context U/K Context management
+ *
+ * These functions allow for initialisation of the user-kernel interface once per process.
+ *
+ * Generally the context will store the OS specific object to communicate with the kernel device driver and further
+ * state information required by the specific implementation. The context is shareable among all threads in the caller process.
+ *
+ * On IOCTL systems, this is likely to be a file descriptor as a result of opening the kernel device driver.
+ *
+ * On a bare-metal/RTOS system with no distinction between kernel and
+ * user-space, the U/K interface simply calls the _mali_ukk variant of the
+ * function by direct function call. In this case, the context returned is the
+ * mali_session_data from _mali_ukk_open().
+ *
+ * The kernel side implementations of the U/K interface expect the first member of the argument structure to
+ * be the context created by _mali_uku_open(). On some OS implementations, the meaning of this context
+ * will be different between user-side and kernel-side. In which case, the kernel-side will need to replace this context
+ * with the kernel-side equivalent, because user-side will not have access to kernel-side data. The context parameter
+ * in the argument structure therefore has to be of type input/output.
+ *
+ * It should be noted that the caller cannot reuse the \c ctx member of U/K
+ * argument structure after a U/K call, because it may be overwritten. Instead,
+ * the context handle must always be stored elsewhere, and copied into
+ * the appropriate U/K argument structure for each user-side call to
+ * the U/K interface. This is not usually a problem, since U/K argument
+ * structures are usually placed on the stack.
+ *
+ * @{ */
+
+/** @brief Begin a new Mali Device Driver session
+ *
+ * This is used to obtain a per-process context handle for all future U/K calls.
+ *
+ * @param context pointer to storage to return a (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_open(void **context);
+
+/** @brief End a Mali Device Driver session
+ *
+ * This should be called when the process no longer requires use of the Mali Device Driver.
+ *
+ * The context handle must not be used after it has been closed.
+ *
+ * @param context pointer to a stored (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_close(void **context);
+
+/** @} */ /* end group _mali_uk_context */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ *
+ * The core functions provide the following functionality:
+ * - verify that the user and kernel API are compatible
+ * - retrieve information about the cores and memory banks in the system
+ * - wait for the result of jobs started on a core
+ *
+ * @{ */
+
+/** @brief Waits for a job notification.
+ *
+ * Sleeps until notified or a timeout occurs. Returns information about the notification.
+ *
+ * @param args see _mali_uk_wait_for_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args);
+
+/** @brief Post a notification to the notification queue of this application.
+ *
+ * @param args see _mali_uk_post_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args);
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * This function is obsolete, but kept to allow old, incompatible user space
+ * clients to robustly detect the incompatibility.
+ *
+ * @param args see _mali_uk_get_api_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args);
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * @param args see _mali_uk_get_api_version_v2_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args);
+
+/** @brief Get the user space settings applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_settings_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args);
+
+/** @brief Get a user space setting applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_setting_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args);
+
+/* @brief Grant or deny high priority scheduling for this session.
+ *
+ * @param args see _mali_uk_request_high_priority_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args);
+
+/** @brief Make process sleep if the pending big job in kernel >= MALI_MAX_PENDING_BIG_JOB
+ *
+ */
+_mali_osk_errcode_t _mali_ukk_pending_submit(_mali_uk_pending_submit_s *args);
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ *
+ * The memory functions provide functionality with and without a Mali-MMU present.
+ *
+ * For Mali-MMU based systems, the following functionality is provided:
+ * - Initialize and terminate MALI virtual address space
+ * - Allocate/deallocate physical memory to a MALI virtual address range and map into/unmap from the
+ * current process address space
+ * - Map/unmap external physical memory into the MALI virtual address range
+ *
+ * For Mali-nonMMU based systems:
+ * - Allocate/deallocate MALI memory
+ *
+ * @{ */
+
+/** @brief Map Mali Memory into the current user process
+ *
+ * Maps Mali memory into the current user process in a generic way.
+ *
+ * This function is to be used for Mali-MMU mode. The function is available in both Mali-MMU and Mali-nonMMU modes,
+ * but should not be called by a user process in Mali-nonMMU mode.
+ *
+ * The implementation and operation of _mali_ukk_mem_mmap() is dependant on whether the driver is built for Mali-MMU
+ * or Mali-nonMMU:
+ * - In the nonMMU case, _mali_ukk_mem_mmap() requires a physical address to be specified. For this reason, an OS U/K
+ * implementation should not allow this to be called from user-space. In any case, nonMMU implementations are
+ * inherently insecure, and so the overall impact is minimal. Mali-MMU mode should be used if security is desired.
+ * - In the MMU case, _mali_ukk_mem_mmap() the _mali_uk_mem_mmap_s::phys_addr
+ * member is used for the \em Mali-virtual address desired for the mapping. The
+ * implementation of _mali_ukk_mem_mmap() will allocate both the CPU-virtual
+ * and CPU-physical addresses, and can cope with mapping a contiguous virtual
+ * address range to a sequence of non-contiguous physical pages. In this case,
+ * the CPU-physical addresses are not communicated back to the user-side, as
+ * they are unnecsessary; the \em Mali-virtual address range must be used for
+ * programming Mali structures.
+ *
+ * In the second (MMU) case, _mali_ukk_mem_mmap() handles management of
+ * CPU-virtual and CPU-physical ranges, but the \em caller must manage the
+ * \em Mali-virtual address range from the user-side.
+ *
+ * @note Mali-virtual address ranges are entirely separate between processes.
+ * It is not possible for a process to accidentally corrupt another process'
+ * \em Mali-virtual address space.
+ *
+ * @param args see _mali_uk_mem_mmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_mmap(_mali_uk_mem_mmap_s *args);
+
+/** @brief Unmap Mali Memory from the current user process
+ *
+ * Unmaps Mali memory from the current user process in a generic way. This only operates on Mali memory supplied
+ * from _mali_ukk_mem_mmap().
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_munmap(_mali_uk_mem_munmap_s *args);
+
+/** @brief Determine the buffer size necessary for an MMU page table dump.
+ * @param args see _mali_uk_query_mmu_page_table_dump_size_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args);
+/** @brief Dump MMU Page tables.
+ * @param args see _mali_uk_dump_mmu_page_table_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args);
+
+/** @brief Write user data to specified Mali memory without causing segfaults.
+ * @param args see _mali_uk_mem_write_safe_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args);
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ *
+ * The Fragment Processor (aka PP (Pixel Processor)) functions provide the following functionality:
+ * - retrieving version of the fragment processors
+ * - determine number of fragment processors
+ * - starting a job on a fragment processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Fragment Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started instead and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_pp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, _mali_uk_pp_start_job_s *uargs);
+
+/**
+ * @brief Issue a request to start new jobs on both Vertex Processor and Fragment Processor.
+ *
+ * @note Will call into @ref _mali_ukk_pp_start_job and @ref _mali_ukk_gp_start_job.
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_pp_and_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, _mali_uk_pp_and_gp_start_job_s *uargs);
+
+/** @brief Returns the number of Fragment Processors in the system
+ *
+ * @param args see _mali_uk_get_pp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args);
+
+/** @brief Returns the version that all Fragment Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_ukk_get_pp_number_of_cores() indicated at least one Fragment
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args);
+
+/** @brief Disable Write-back unit(s) on specified job
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ */
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args);
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ *
+ * The Vertex Processor (aka GP (Geometry Processor)) functions provide the following functionality:
+ * - retrieving version of the Vertex Processors
+ * - determine number of Vertex Processors available
+ * - starting a job on a Vertex Processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Vertex Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param ctx user-kernel context (mali_session)
+ * @param uargs see _mali_uk_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data!
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs);
+
+/** @brief Returns the number of Vertex Processors in the system.
+ *
+ * @param args see _mali_uk_get_gp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args);
+
+/** @brief Returns the version that all Vertex Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_uk_get_gp_number_of_cores() indicated at least one Vertex
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_gp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args);
+
+/** @brief Resume or abort suspended Vertex Processor jobs.
+ *
+ * After receiving notification that a Vertex Processor job was suspended from
+ * _mali_ukk_wait_for_notification() you can use this function to resume or abort the job.
+ *
+ * @param args see _mali_uk_gp_suspend_response_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args);
+
+/** @} */ /* end group _mali_uk_gp */
+
+#if defined(CONFIG_MALI400_PROFILING)
+/** @addtogroup _mali_uk_profiling U/K Timeline profiling module
+ * @{ */
+
+/** @brief Add event to profiling buffer.
+ *
+ * @param args see _mali_uk_profiling_add_event_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args);
+
+/** @brief Get profiling stream fd.
+ *
+ * @param args see _mali_uk_profiling_stream_fd_get_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_stream_fd_get(_mali_uk_profiling_stream_fd_get_s *args);
+
+/** @brief Profiling control set.
+ *
+ * @param args see _mali_uk_profiling_control_set_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_control_set(_mali_uk_profiling_control_set_s *args);
+
+/** @} */ /* end group _mali_uk_profiling */
+#endif
+
+/** @addtogroup _mali_uk_vsync U/K VSYNC reporting module
+ * @{ */
+
+/** @brief Report events related to vsync.
+ *
+ * @note Events should be reported when starting to wait for vsync and when the
+ * waiting is finished. This information can then be used in kernel space to
+ * complement the GPU utilization metric.
+ *
+ * @param args see _mali_uk_vsync_event_report_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args);
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @addtogroup _mali_sw_counters_report U/K Software counter reporting
+ * @{ */
+
+/** @brief Report software counters.
+ *
+ * @param args see _mali_uk_sw_counters_report_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args);
+
+/** @} */ /* end group _mali_sw_counters_report */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+u32 _mali_ukk_report_memory_usage(void);
+
+u32 _mali_ukk_report_total_memory_size(void);
+
+u32 _mali_ukk_utilization_gp_pp(void);
+
+u32 _mali_ukk_utilization_gp(void);
+
+u32 _mali_ukk_utilization_pp(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_H__ */
diff --git a/drivers/gpu/arm/utgard/common/mali_user_settings_db.c b/drivers/gpu/arm/utgard/common/mali_user_settings_db.c
new file mode 100644
index 000000000000..54e1580fad1a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_user_settings_db.c
@@ -0,0 +1,147 @@
+/**
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_user_settings_db.h"
+#include "mali_session.h"
+
+static u32 mali_user_settings[_MALI_UK_USER_SETTING_MAX];
+const char *_mali_uk_user_setting_descriptions[] = _MALI_UK_USER_SETTING_DESCRIPTIONS;
+
+static void mali_user_settings_notify(_mali_uk_user_setting_t setting, u32 value)
+{
+ mali_bool done = MALI_FALSE;
+
+ /*
+ * This function gets a bit complicated because we can't hold the session lock while
+ * allocating notification objects.
+ */
+
+ while (!done) {
+ u32 i;
+ u32 num_sessions_alloc;
+ u32 num_sessions_with_lock;
+ u32 used_notification_objects = 0;
+ _mali_osk_notification_t **notobjs;
+
+ /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
+ num_sessions_alloc = mali_session_get_count();
+ if (0 == num_sessions_alloc) {
+ /* No sessions to report to */
+ return;
+ }
+
+ notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
+ if (NULL == notobjs) {
+ MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
+ return;
+ }
+
+ for (i = 0; i < num_sessions_alloc; i++) {
+ notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_SETTINGS_CHANGED,
+ sizeof(_mali_uk_settings_changed_s));
+ if (NULL != notobjs[i]) {
+ _mali_uk_settings_changed_s *data;
+ data = notobjs[i]->result_buffer;
+
+ data->setting = setting;
+ data->value = value;
+ } else {
+ MALI_PRINT_ERROR(("Failed to notify user space session about setting change (alloc failure %u)\n", i));
+ }
+ }
+
+ mali_session_lock();
+
+ /* number of sessions will not change while we hold the lock */
+ num_sessions_with_lock = mali_session_get_count();
+
+ if (num_sessions_alloc >= num_sessions_with_lock) {
+ /* We have allocated enough notification objects for all the sessions atm */
+ struct mali_session_data *session, *tmp;
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
+ if (NULL != notobjs[used_notification_objects]) {
+ mali_session_send_notification(session, notobjs[used_notification_objects]);
+ notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
+ }
+ used_notification_objects++;
+ }
+ done = MALI_TRUE;
+ }
+
+ mali_session_unlock();
+
+ /* Delete any remaining/unused notification objects */
+ for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
+ if (NULL != notobjs[used_notification_objects]) {
+ _mali_osk_notification_delete(notobjs[used_notification_objects]);
+ }
+ }
+
+ _mali_osk_free(notobjs);
+ }
+}
+
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value)
+{
+ mali_bool notify = MALI_FALSE;
+
+ if (setting >= _MALI_UK_USER_SETTING_MAX) {
+ MALI_DEBUG_PRINT_ERROR(("Invalid user setting %ud\n"));
+ return;
+ }
+
+ if (mali_user_settings[setting] != value) {
+ notify = MALI_TRUE;
+ }
+
+ mali_user_settings[setting] = value;
+
+ if (notify) {
+ mali_user_settings_notify(setting, value);
+ }
+}
+
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting)
+{
+ if (setting >= _MALI_UK_USER_SETTING_MAX) {
+ return 0;
+ }
+
+ return mali_user_settings[setting];
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args)
+{
+ _mali_uk_user_setting_t setting;
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ setting = args->setting;
+
+ if (_MALI_UK_USER_SETTING_MAX > setting) {
+ args->value = mali_user_settings[setting];
+ return _MALI_OSK_ERR_OK;
+ } else {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ _mali_osk_memcpy(args->settings, mali_user_settings, sizeof(mali_user_settings));
+
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/utgard/common/mali_user_settings_db.h b/drivers/gpu/arm/utgard/common/mali_user_settings_db.h
new file mode 100644
index 000000000000..0732c3e56e2a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/common/mali_user_settings_db.h
@@ -0,0 +1,39 @@
+/**
+ * Copyright (C) 2012-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_USER_SETTINGS_DB_H__
+#define __MALI_USER_SETTINGS_DB_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_uk_types.h"
+
+/** @brief Set Mali user setting in DB
+ *
+ * Update the DB with a new value for \a setting. If the value is different from theprevious set value running sessions will be notified of the change.
+ *
+ * @param setting the setting to be changed
+ * @param value the new value to set
+ */
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value);
+
+/** @brief Get current Mali user setting value from DB
+ *
+ * @param setting the setting to extract
+ * @return the value of the selected setting
+ */
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* __MALI_KERNEL_USER_SETTING__ */
diff --git a/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard.h b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard.h
new file mode 100644
index 000000000000..4ea02fe65cb5
--- /dev/null
+++ b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard.h
@@ -0,0 +1,507 @@
+/*
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_utgard.h
+ * Defines types and interface exposed by the Mali Utgard device driver
+ */
+
+#ifndef __MALI_UTGARD_H__
+#define __MALI_UTGARD_H__
+
+#include "mali_osk_types.h"
+
+#define MALI_GPU_NAME_UTGARD "mali-utgard"
+
+
+#define MALI_OFFSET_GP 0x00000
+#define MALI_OFFSET_GP_MMU 0x03000
+
+#define MALI_OFFSET_PP0 0x08000
+#define MALI_OFFSET_PP0_MMU 0x04000
+#define MALI_OFFSET_PP1 0x0A000
+#define MALI_OFFSET_PP1_MMU 0x05000
+#define MALI_OFFSET_PP2 0x0C000
+#define MALI_OFFSET_PP2_MMU 0x06000
+#define MALI_OFFSET_PP3 0x0E000
+#define MALI_OFFSET_PP3_MMU 0x07000
+
+#define MALI_OFFSET_PP4 0x28000
+#define MALI_OFFSET_PP4_MMU 0x1C000
+#define MALI_OFFSET_PP5 0x2A000
+#define MALI_OFFSET_PP5_MMU 0x1D000
+#define MALI_OFFSET_PP6 0x2C000
+#define MALI_OFFSET_PP6_MMU 0x1E000
+#define MALI_OFFSET_PP7 0x2E000
+#define MALI_OFFSET_PP7_MMU 0x1F000
+
+#define MALI_OFFSET_L2_RESOURCE0 0x01000
+#define MALI_OFFSET_L2_RESOURCE1 0x10000
+#define MALI_OFFSET_L2_RESOURCE2 0x11000
+
+#define MALI400_OFFSET_L2_CACHE0 MALI_OFFSET_L2_RESOURCE0
+#define MALI450_OFFSET_L2_CACHE0 MALI_OFFSET_L2_RESOURCE1
+#define MALI450_OFFSET_L2_CACHE1 MALI_OFFSET_L2_RESOURCE0
+#define MALI450_OFFSET_L2_CACHE2 MALI_OFFSET_L2_RESOURCE2
+#define MALI470_OFFSET_L2_CACHE1 MALI_OFFSET_L2_RESOURCE0
+
+#define MALI_OFFSET_BCAST 0x13000
+#define MALI_OFFSET_DLBU 0x14000
+
+#define MALI_OFFSET_PP_BCAST 0x16000
+#define MALI_OFFSET_PP_BCAST_MMU 0x15000
+
+#define MALI_OFFSET_PMU 0x02000
+#define MALI_OFFSET_DMA 0x12000
+
+/* Mali-300 */
+
+#define MALI_GPU_RESOURCES_MALI300(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI300_PMU(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq)
+
+/* Mali-400 */
+
+#define MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
+
+#define MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
+
+#define MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
+
+#define MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq)
+
+#define MALI_GPU_RESOURCES_MALI400_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+ MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+ /* Mali-450 */
+#define MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+ MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
+
+#define MALI_GPU_RESOURCES_MALI450_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI450_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+ MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
+
+#define MALI_GPU_RESOURCES_MALI450_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE2) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP4, pp3_irq, base_addr + MALI_OFFSET_PP4_MMU, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + MALI_OFFSET_PP5, pp4_irq, base_addr + MALI_OFFSET_PP5_MMU, pp4_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + MALI_OFFSET_PP6, pp5_irq, base_addr + MALI_OFFSET_PP6_MMU, pp5_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+ MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
+
+#define MALI_GPU_RESOURCES_MALI450_MP6_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE2) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + MALI_OFFSET_PP4, pp4_irq, base_addr + MALI_OFFSET_PP4_MMU, pp4_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + MALI_OFFSET_PP5, pp5_irq, base_addr + MALI_OFFSET_PP5_MMU, pp5_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(6, base_addr + MALI_OFFSET_PP6, pp6_irq, base_addr + MALI_OFFSET_PP6_MMU, pp6_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(7, base_addr + MALI_OFFSET_PP7, pp7_irq, base_addr + MALI_OFFSET_PP7_MMU, pp7_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+ MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
+
+#define MALI_GPU_RESOURCES_MALI450_MP8_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+ /* Mali - 470 */
+#define MALI_GPU_RESOURCES_MALI470_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI470_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI470_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI470_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI470_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI470_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI470_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI470_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI470_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCES_MALI470_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
+
+#define MALI_GPU_RESOURCES_MALI470_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCES_MALI470_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
+#define MALI_GPU_RESOURCE_L2(addr) \
+ { \
+ .name = "Mali_L2", \
+ .flags = IORESOURCE_MEM, \
+ .start = addr, \
+ .end = addr + 0x200, \
+ },
+
+#define MALI_GPU_RESOURCE_GP(gp_addr, gp_irq) \
+ { \
+ .name = "Mali_GP", \
+ .flags = IORESOURCE_MEM, \
+ .start = gp_addr, \
+ .end = gp_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_GP_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = gp_irq, \
+ .end = gp_irq, \
+ }, \
+
+#define MALI_GPU_RESOURCE_GP_WITH_MMU(gp_addr, gp_irq, gp_mmu_addr, gp_mmu_irq) \
+ { \
+ .name = "Mali_GP", \
+ .flags = IORESOURCE_MEM, \
+ .start = gp_addr, \
+ .end = gp_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_GP_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = gp_irq, \
+ .end = gp_irq, \
+ }, \
+ { \
+ .name = "Mali_GP_MMU", \
+ .flags = IORESOURCE_MEM, \
+ .start = gp_mmu_addr, \
+ .end = gp_mmu_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_GP_MMU_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = gp_mmu_irq, \
+ .end = gp_mmu_irq, \
+ },
+
+#define MALI_GPU_RESOURCE_PP(pp_addr, pp_irq) \
+ { \
+ .name = "Mali_PP", \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_addr, \
+ .end = pp_addr + 0x1100, \
+ }, \
+ { \
+ .name = "Mali_PP_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = pp_irq, \
+ .end = pp_irq, \
+ }, \
+
+#define MALI_GPU_RESOURCE_PP_WITH_MMU(id, pp_addr, pp_irq, pp_mmu_addr, pp_mmu_irq) \
+ { \
+ .name = "Mali_PP" #id, \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_addr, \
+ .end = pp_addr + 0x1100, \
+ }, \
+ { \
+ .name = "Mali_PP" #id "_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = pp_irq, \
+ .end = pp_irq, \
+ }, \
+ { \
+ .name = "Mali_PP" #id "_MMU", \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_mmu_addr, \
+ .end = pp_mmu_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_PP" #id "_MMU_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = pp_mmu_irq, \
+ .end = pp_mmu_irq, \
+ },
+
+#define MALI_GPU_RESOURCE_MMU(mmu_addr, mmu_irq) \
+ { \
+ .name = "Mali_MMU", \
+ .flags = IORESOURCE_MEM, \
+ .start = mmu_addr, \
+ .end = mmu_addr + 0x100, \
+ }, \
+ { \
+ .name = "Mali_MMU_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = mmu_irq, \
+ .end = mmu_irq, \
+ },
+
+#define MALI_GPU_RESOURCE_PMU(pmu_addr) \
+ { \
+ .name = "Mali_PMU", \
+ .flags = IORESOURCE_MEM, \
+ .start = pmu_addr, \
+ .end = pmu_addr + 0x100, \
+ },
+
+#define MALI_GPU_RESOURCE_DMA(dma_addr) \
+ { \
+ .name = "Mali_DMA", \
+ .flags = IORESOURCE_MEM, \
+ .start = dma_addr, \
+ .end = dma_addr + 0x100, \
+ },
+
+#define MALI_GPU_RESOURCE_DLBU(dlbu_addr) \
+ { \
+ .name = "Mali_DLBU", \
+ .flags = IORESOURCE_MEM, \
+ .start = dlbu_addr, \
+ .end = dlbu_addr + 0x100, \
+ },
+
+#define MALI_GPU_RESOURCE_BCAST(bcast_addr) \
+ { \
+ .name = "Mali_Broadcast", \
+ .flags = IORESOURCE_MEM, \
+ .start = bcast_addr, \
+ .end = bcast_addr + 0x100, \
+ },
+
+#define MALI_GPU_RESOURCE_PP_BCAST(pp_addr, pp_irq) \
+ { \
+ .name = "Mali_PP_Broadcast", \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_addr, \
+ .end = pp_addr + 0x1100, \
+ }, \
+ { \
+ .name = "Mali_PP_Broadcast_IRQ", \
+ .flags = IORESOURCE_IRQ, \
+ .start = pp_irq, \
+ .end = pp_irq, \
+ }, \
+
+#define MALI_GPU_RESOURCE_PP_MMU_BCAST(pp_mmu_bcast_addr) \
+ { \
+ .name = "Mali_PP_MMU_Broadcast", \
+ .flags = IORESOURCE_MEM, \
+ .start = pp_mmu_bcast_addr, \
+ .end = pp_mmu_bcast_addr + 0x100, \
+ },
+
+ struct mali_gpu_utilization_data {
+ unsigned int utilization_gpu; /* Utilization for GP and all PP cores combined, 0 = no utilization, 256 = full utilization */
+ unsigned int utilization_gp; /* Utilization for GP core only, 0 = no utilization, 256 = full utilization */
+ unsigned int utilization_pp; /* Utilization for all PP cores combined, 0 = no utilization, 256 = full utilization */
+ };
+
+ struct mali_gpu_clk_item {
+ unsigned int clock; /* unit(MHz) */
+ unsigned int vol;
+ };
+
+ struct mali_gpu_clock {
+ struct mali_gpu_clk_item *item;
+ unsigned int num_of_steps;
+ };
+
+ struct mali_gpu_device_data {
+ /* Shared GPU memory */
+ unsigned long shared_mem_size;
+
+ /*
+ * Mali PMU switch delay.
+ * Only needed if the power gates are connected to the PMU in a high fanout
+ * network. This value is the number of Mali clock cycles it takes to
+ * enable the power gates and turn on the power mesh.
+ * This value will have no effect if a daisy chain implementation is used.
+ */
+ u32 pmu_switch_delay;
+
+ /* Mali Dynamic power domain configuration in sequence from 0-11
+ * GP PP0 PP1 PP2 PP3 PP4 PP5 PP6 PP7, L2$0 L2$1 L2$2
+ */
+ u16 pmu_domain_config[12];
+
+ /* Dedicated GPU memory range (physical). */
+ unsigned long dedicated_mem_start;
+ unsigned long dedicated_mem_size;
+
+ /* Frame buffer memory to be accessible by Mali GPU (physical) */
+ unsigned long fb_start;
+ unsigned long fb_size;
+
+ /* Max runtime [ms] for jobs */
+ int max_job_runtime;
+
+ /* Report GPU utilization and related control in this interval (specified in ms) */
+ unsigned long control_interval;
+
+ /* Function that will receive periodic GPU utilization numbers */
+ void (*utilization_callback)(struct mali_gpu_utilization_data *data);
+
+ /* Fuction that platform callback for freq setting, needed when CONFIG_MALI_DVFS enabled */
+ int (*set_freq)(int setting_clock_step);
+ /* Function that platfrom report it's clock info which driver can set, needed when CONFIG_MALI_DVFS enabled */
+ void (*get_clock_info)(struct mali_gpu_clock **data);
+ /* Function that get the current clock info, needed when CONFIG_MALI_DVFS enabled */
+ int (*get_freq)(void);
+ };
+
+ /**
+ * Pause the scheduling and power state changes of Mali device driver.
+ * mali_dev_resume() must always be called as soon as possible after this function
+ * in order to resume normal operation of the Mali driver.
+ */
+ void mali_dev_pause(void);
+
+ /**
+ * Resume scheduling and allow power changes in Mali device driver.
+ * This must always be called after mali_dev_pause().
+ */
+ void mali_dev_resume(void);
+
+ /** @brief Set the desired number of PP cores to use.
+ *
+ * The internal Mali PMU will be used, if present, to physically power off the PP cores.
+ *
+ * @param num_cores The number of desired cores
+ * @return 0 on success, otherwise error. -EINVAL means an invalid number of cores was specified.
+ */
+ int mali_perf_set_num_pp_cores(unsigned int num_cores);
+
+#endif
diff --git a/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_ioctl.h b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_ioctl.h
new file mode 100644
index 000000000000..6a6e69ab6978
--- /dev/null
+++ b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_ioctl.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+ * Class Path Exception
+ * Linking this library statically or dynamically with other modules is making a combined work based on this library.
+ * Thus, the terms and conditions of the GNU General Public License cover the whole combination.
+ * As a special exception, the copyright holders of this library give you permission to link this library with independent modules
+ * to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting
+ * executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions
+ * of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify
+ * this library, you may extend this exception to your version of the library, but you are not obligated to do so.
+ * If you do not wish to do so, delete this exception statement from your version.
+ */
+
+#ifndef __MALI_UTGARD_IOCTL_H__
+#define __MALI_UTGARD_IOCTL_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h> /* file system operations */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file mali_kernel_ioctl.h
+ * Interface to the Linux device driver.
+ * This file describes the interface needed to use the Linux device driver.
+ * Its interface is designed to used by the HAL implementation through a thin arch layer.
+ */
+
+/**
+ * ioctl commands
+ */
+
+#define MALI_IOC_BASE 0x82
+#define MALI_IOC_CORE_BASE (_MALI_UK_CORE_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_MEMORY_BASE (_MALI_UK_MEMORY_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PP_BASE (_MALI_UK_PP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_GP_BASE (_MALI_UK_GP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PROFILING_BASE (_MALI_UK_PROFILING_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_VSYNC_BASE (_MALI_UK_VSYNC_SUBSYSTEM + MALI_IOC_BASE)
+
+#define MALI_IOC_WAIT_FOR_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s)
+#define MALI_IOC_GET_API_VERSION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, u32)
+#define MALI_IOC_GET_API_VERSION_V2 _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_v2_s)
+#define MALI_IOC_POST_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s)
+#define MALI_IOC_GET_USER_SETTING _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTING, _mali_uk_get_user_setting_s)
+#define MALI_IOC_GET_USER_SETTINGS _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTINGS, _mali_uk_get_user_settings_s)
+#define MALI_IOC_REQUEST_HIGH_PRIORITY _IOW (MALI_IOC_CORE_BASE, _MALI_UK_REQUEST_HIGH_PRIORITY, _mali_uk_request_high_priority_s)
+#define MALI_IOC_TIMELINE_GET_LATEST_POINT _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_GET_LATEST_POINT, _mali_uk_timeline_get_latest_point_s)
+#define MALI_IOC_TIMELINE_WAIT _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_WAIT, _mali_uk_timeline_wait_s)
+#define MALI_IOC_TIMELINE_CREATE_SYNC_FENCE _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, _mali_uk_timeline_create_sync_fence_s)
+#define MALI_IOC_SOFT_JOB_START _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_START, _mali_uk_soft_job_start_s)
+#define MALI_IOC_SOFT_JOB_SIGNAL _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_SIGNAL, _mali_uk_soft_job_signal_s)
+#define MALI_IOC_PENDING_SUBMIT _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_PENDING_SUBMIT, _mali_uk_pending_submit_s)
+
+#define MALI_IOC_MEM_ALLOC _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ALLOC_MEM, _mali_uk_alloc_mem_s)
+#define MALI_IOC_MEM_FREE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_MEM, _mali_uk_free_mem_s)
+#define MALI_IOC_MEM_BIND _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_BIND_MEM, _mali_uk_bind_mem_s)
+#define MALI_IOC_MEM_UNBIND _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_UNBIND_MEM, _mali_uk_unbind_mem_s)
+#define MALI_IOC_MEM_COW _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_COW_MEM, _mali_uk_cow_mem_s)
+#define MALI_IOC_MEM_COW_MODIFY_RANGE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_COW_MODIFY_RANGE, _mali_uk_cow_modify_range_s)
+#define MALI_IOC_MEM_RESIZE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_RESIZE_MEM, _mali_uk_mem_resize_s)
+#define MALI_IOC_MEM_DMA_BUF_GET_SIZE _IOR(MALI_IOC_MEMORY_BASE, _MALI_UK_DMA_BUF_GET_SIZE, _mali_uk_dma_buf_get_size_s)
+#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s)
+#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s)
+#define MALI_IOC_MEM_WRITE_SAFE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MEM_WRITE_SAFE, _mali_uk_mem_write_safe_s)
+
+#define MALI_IOC_PP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s)
+#define MALI_IOC_PP_AND_GP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_AND_GP_START_JOB, _mali_uk_pp_and_gp_start_job_s)
+#define MALI_IOC_PP_NUMBER_OF_CORES_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s)
+#define MALI_IOC_PP_CORE_VERSION_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s)
+#define MALI_IOC_PP_DISABLE_WB _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_DISABLE_WB, _mali_uk_pp_disable_wb_s)
+
+#define MALI_IOC_GP2_START_JOB _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s)
+#define MALI_IOC_GP2_NUMBER_OF_CORES_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s)
+#define MALI_IOC_GP2_CORE_VERSION_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s)
+#define MALI_IOC_GP2_SUSPEND_RESPONSE _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s)
+
+#define MALI_IOC_PROFILING_ADD_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s)
+#define MALI_IOC_PROFILING_REPORT_SW_COUNTERS _IOW (MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_REPORT_SW_COUNTERS, _mali_uk_sw_counters_report_s)
+#define MALI_IOC_PROFILING_MEMORY_USAGE_GET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_MEMORY_USAGE_GET, _mali_uk_profiling_memory_usage_get_s)
+#define MALI_IOC_PROFILING_STREAM_FD_GET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STREAM_FD_GET, _mali_uk_profiling_stream_fd_get_s)
+#define MALI_IOC_PROILING_CONTROL_SET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CONTROL_SET, _mali_uk_profiling_control_set_s)
+
+#define MALI_IOC_VSYNC_EVENT_REPORT _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_IOCTL_H__ */
diff --git a/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_profiling_events.h b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_profiling_events.h
new file mode 100644
index 000000000000..279bf8ee38a4
--- /dev/null
+++ b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_profiling_events.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+ * Class Path Exception
+ * Linking this library statically or dynamically with other modules is making a combined work based on this library.
+ * Thus, the terms and conditions of the GNU General Public License cover the whole combination.
+ * As a special exception, the copyright holders of this library give you permission to link this library with independent modules
+ * to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting
+ * executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions
+ * of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify
+ * this library, you may extend this exception to your version of the library, but you are not obligated to do so.
+ * If you do not wish to do so, delete this exception statement from your version.
+ */
+
+#ifndef _MALI_UTGARD_PROFILING_EVENTS_H_
+#define _MALI_UTGARD_PROFILING_EVENTS_H_
+
+/*
+ * The event ID is a 32 bit value consisting of different fields
+ * reserved, 4 bits, for future use
+ * event type, 4 bits, cinstr_profiling_event_type_t
+ * event channel, 8 bits, the source of the event.
+ * event data, 16 bit field, data depending on event type
+ */
+
+/**
+ * Specifies what kind of event this is
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_TYPE_SINGLE = 0 << 24,
+ MALI_PROFILING_EVENT_TYPE_START = 1 << 24,
+ MALI_PROFILING_EVENT_TYPE_STOP = 2 << 24,
+ MALI_PROFILING_EVENT_TYPE_SUSPEND = 3 << 24,
+ MALI_PROFILING_EVENT_TYPE_RESUME = 4 << 24,
+} cinstr_profiling_event_type_t;
+
+
+/**
+ * Secifies the channel/source of the event
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE = 0 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_GP0 = 1 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP0 = 5 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP1 = 6 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP2 = 7 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP3 = 8 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP4 = 9 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP5 = 10 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP6 = 11 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_PP7 = 12 << 16,
+ MALI_PROFILING_EVENT_CHANNEL_GPU = 21 << 16,
+} cinstr_profiling_event_channel_t;
+
+
+#define MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(num) (((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) + (num)) << 16)
+#define MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(num) (((MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) + (num)) << 16)
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from software channel
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_NONE = 0,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_NEW_FRAME = 1,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_FLUSH = 2,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_SWAP_BUFFERS = 3,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_FB_EVENT = 4,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE = 5,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE = 6,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_READBACK = 7,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_WRITEBACK = 8,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_ENTER_API_FUNC = 10,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC = 11,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_DISCARD_ATTACHMENTS = 13,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_TRY_LOCK = 53,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_LOCK = 54,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_UNLOCK = 55,
+ MALI_PROFILING_EVENT_REASON_SINGLE_LOCK_CONTENDED = 56,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_MALI_FENCE_DUP = 57,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_SET_PP_JOB_FENCE = 58,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_WAIT_SYNC = 59,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_CREATE_FENCE_SYNC = 60,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_CREATE_NATIVE_FENCE_SYNC = 61,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_FENCE_FLUSH = 62,
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_FLUSH_SERVER_WAITS = 63,
+} cinstr_profiling_event_reason_single_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_START/STOP is used from software channel
+ * to inform whether the core is physical or virtual
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL = 0,
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL = 1,
+} cinstr_profiling_event_reason_start_stop_hw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_START/STOP is used from software channel
+ */
+typedef enum {
+ /*MALI_PROFILING_EVENT_REASON_START_STOP_SW_NONE = 0,*/
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_MALI = 1,
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_CALLBACK_THREAD = 2,
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_WORKER_THREAD = 3,
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF = 4,
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF = 5,
+} cinstr_profiling_event_reason_start_stop_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SUSPEND/RESUME is used from software channel
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_NONE = 0, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_PIPELINE_FULL = 1, /* NOT used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC = 26, /* used in some build configurations */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_WAIT = 27, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_SYNC = 28, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_FILTER_CLEANUP = 29, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_TEXTURE = 30, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_MIPLEVEL = 31, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_READPIXELS = 32, /* used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SWAP_IMMEDIATE = 33, /* NOT used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_QUEUE_BUFFER = 34, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_DEQUEUE_BUFFER = 35, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_UMP_LOCK = 36, /* Not currently used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_X11_GLOBAL_LOCK = 37, /* Not currently used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_X11_SWAP = 38, /* Not currently used */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_MALI_EGL_IMAGE_SYNC_WAIT = 39, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GP_JOB_HANDLING = 40, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_PP_JOB_HANDLING = 41, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_MALI_FENCE_MERGE = 42, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_MALI_FENCE_DUP = 43,
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_FLUSH_SERVER_WAITS = 44,
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SYNC = 45, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_JOBS_WAIT = 46, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_NOFRAMES_WAIT = 47, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_NOJOBS_WAIT = 48, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_SUBMIT_LIMITER_WAIT = 49, /* USED */
+} cinstr_profiling_event_reason_suspend_resume_sw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from a HW channel (GPx+PPx)
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_NONE = 0,
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT = 1,
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH = 2,
+} cinstr_profiling_event_reason_single_hw_t;
+
+/**
+ * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from the GPU channel
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_NONE = 0,
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE = 1,
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS = 2,
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS = 3,
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS = 4,
+} cinstr_profiling_event_reason_single_gpu_t;
+
+/**
+ * These values are applicable for the 3rd data parameter when
+ * the type MALI_PROFILING_EVENT_TYPE_START is used from the software channel
+ * with the MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF reason.
+ */
+typedef enum {
+ MALI_PROFILING_EVENT_DATA_CORE_GP0 = 1,
+ MALI_PROFILING_EVENT_DATA_CORE_PP0 = 5,
+ MALI_PROFILING_EVENT_DATA_CORE_PP1 = 6,
+ MALI_PROFILING_EVENT_DATA_CORE_PP2 = 7,
+ MALI_PROFILING_EVENT_DATA_CORE_PP3 = 8,
+ MALI_PROFILING_EVENT_DATA_CORE_PP4 = 9,
+ MALI_PROFILING_EVENT_DATA_CORE_PP5 = 10,
+ MALI_PROFILING_EVENT_DATA_CORE_PP6 = 11,
+ MALI_PROFILING_EVENT_DATA_CORE_PP7 = 12,
+ MALI_PROFILING_EVENT_DATA_CORE_GP0_MMU = 22, /* GP0 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP0_MMU = 26, /* PP0 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP1_MMU = 27, /* PP1 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP2_MMU = 28, /* PP2 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP3_MMU = 29, /* PP3 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP4_MMU = 30, /* PP4 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP5_MMU = 31, /* PP5 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP6_MMU = 32, /* PP6 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP7_MMU = 33, /* PP7 + 21 */
+
+} cinstr_profiling_event_data_core_t;
+
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0 + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0_MMU + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0 + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0_MMU + (num))
+
+
+#endif /*_MALI_UTGARD_PROFILING_EVENTS_H_*/
diff --git a/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_profiling_gator_api.h b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_profiling_gator_api.h
new file mode 100644
index 000000000000..b922187a0b88
--- /dev/null
+++ b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_profiling_gator_api.h
@@ -0,0 +1,315 @@
+/*
+ * Copyright (C) 2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+ * Class Path Exception
+ * Linking this library statically or dynamically with other modules is making a combined work based on this library.
+ * Thus, the terms and conditions of the GNU General Public License cover the whole combination.
+ * As a special exception, the copyright holders of this library give you permission to link this library with independent modules
+ * to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting
+ * executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions
+ * of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify
+ * this library, you may extend this exception to your version of the library, but you are not obligated to do so.
+ * If you do not wish to do so, delete this exception statement from your version.
+ */
+
+#ifndef __MALI_UTGARD_PROFILING_GATOR_API_H__
+#define __MALI_UTGARD_PROFILING_GATOR_API_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MALI_PROFILING_API_VERSION 4
+
+#define MAX_NUM_L2_CACHE_CORES 3
+#define MAX_NUM_FP_CORES 8
+#define MAX_NUM_VP_CORES 1
+
+#define _MALI_SPCIAL_COUNTER_DESCRIPTIONS \
+ { \
+ "Filmstrip_cnt0", \
+ "Frequency", \
+ "Voltage", \
+ "vertex", \
+ "fragment", \
+ "Total_alloc_pages", \
+ };
+
+#define _MALI_MEM_COUTNER_DESCRIPTIONS \
+ { \
+ "untyped_memory", \
+ "vertex_index_buffer", \
+ "texture_buffer", \
+ "varying_buffer", \
+ "render_target", \
+ "pbuffer_buffer", \
+ "plbu_heap", \
+ "pointer_array_buffer", \
+ "slave_tilelist", \
+ "untyped_gp_cmdlist", \
+ "polygon_cmdlist", \
+ "texture_descriptor", \
+ "render_state_word", \
+ "shader", \
+ "stream_buffer", \
+ "fragment_stack", \
+ "uniform", \
+ "untyped_frame_pool", \
+ "untyped_surface", \
+ };
+
+/** The list of events supported by the Mali DDK. */
+typedef enum {
+ /* Vertex processor activity */
+ ACTIVITY_VP_0 = 0,
+
+ /* Fragment processor activity */
+ ACTIVITY_FP_0,
+ ACTIVITY_FP_1,
+ ACTIVITY_FP_2,
+ ACTIVITY_FP_3,
+ ACTIVITY_FP_4,
+ ACTIVITY_FP_5,
+ ACTIVITY_FP_6,
+ ACTIVITY_FP_7,
+
+ /* L2 cache counters */
+ COUNTER_L2_0_C0,
+ COUNTER_L2_0_C1,
+ COUNTER_L2_1_C0,
+ COUNTER_L2_1_C1,
+ COUNTER_L2_2_C0,
+ COUNTER_L2_2_C1,
+
+ /* Vertex processor counters */
+ COUNTER_VP_0_C0,
+ COUNTER_VP_0_C1,
+
+ /* Fragment processor counters */
+ COUNTER_FP_0_C0,
+ COUNTER_FP_0_C1,
+ COUNTER_FP_1_C0,
+ COUNTER_FP_1_C1,
+ COUNTER_FP_2_C0,
+ COUNTER_FP_2_C1,
+ COUNTER_FP_3_C0,
+ COUNTER_FP_3_C1,
+ COUNTER_FP_4_C0,
+ COUNTER_FP_4_C1,
+ COUNTER_FP_5_C0,
+ COUNTER_FP_5_C1,
+ COUNTER_FP_6_C0,
+ COUNTER_FP_6_C1,
+ COUNTER_FP_7_C0,
+ COUNTER_FP_7_C1,
+
+ /*
+ * If more hardware counters are added, the _mali_osk_hw_counter_table
+ * below should also be updated.
+ */
+
+ /* EGL software counters */
+ COUNTER_EGL_BLIT_TIME,
+
+ /* GLES software counters */
+ COUNTER_GLES_DRAW_ELEMENTS_CALLS,
+ COUNTER_GLES_DRAW_ELEMENTS_NUM_INDICES,
+ COUNTER_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED,
+ COUNTER_GLES_DRAW_ARRAYS_CALLS,
+ COUNTER_GLES_DRAW_ARRAYS_NUM_TRANSFORMED,
+ COUNTER_GLES_DRAW_POINTS,
+ COUNTER_GLES_DRAW_LINES,
+ COUNTER_GLES_DRAW_LINE_LOOP,
+ COUNTER_GLES_DRAW_LINE_STRIP,
+ COUNTER_GLES_DRAW_TRIANGLES,
+ COUNTER_GLES_DRAW_TRIANGLE_STRIP,
+ COUNTER_GLES_DRAW_TRIANGLE_FAN,
+ COUNTER_GLES_NON_VBO_DATA_COPY_TIME,
+ COUNTER_GLES_UNIFORM_BYTES_COPIED_TO_MALI,
+ COUNTER_GLES_UPLOAD_TEXTURE_TIME,
+ COUNTER_GLES_UPLOAD_VBO_TIME,
+ COUNTER_GLES_NUM_FLUSHES,
+ COUNTER_GLES_NUM_VSHADERS_GENERATED,
+ COUNTER_GLES_NUM_FSHADERS_GENERATED,
+ COUNTER_GLES_VSHADER_GEN_TIME,
+ COUNTER_GLES_FSHADER_GEN_TIME,
+ COUNTER_GLES_INPUT_TRIANGLES,
+ COUNTER_GLES_VXCACHE_HIT,
+ COUNTER_GLES_VXCACHE_MISS,
+ COUNTER_GLES_VXCACHE_COLLISION,
+ COUNTER_GLES_CULLED_TRIANGLES,
+ COUNTER_GLES_CULLED_LINES,
+ COUNTER_GLES_BACKFACE_TRIANGLES,
+ COUNTER_GLES_GBCLIP_TRIANGLES,
+ COUNTER_GLES_GBCLIP_LINES,
+ COUNTER_GLES_TRIANGLES_DRAWN,
+ COUNTER_GLES_DRAWCALL_TIME,
+ COUNTER_GLES_TRIANGLES_COUNT,
+ COUNTER_GLES_INDEPENDENT_TRIANGLES_COUNT,
+ COUNTER_GLES_STRIP_TRIANGLES_COUNT,
+ COUNTER_GLES_FAN_TRIANGLES_COUNT,
+ COUNTER_GLES_LINES_COUNT,
+ COUNTER_GLES_INDEPENDENT_LINES_COUNT,
+ COUNTER_GLES_STRIP_LINES_COUNT,
+ COUNTER_GLES_LOOP_LINES_COUNT,
+
+ /* Special counter */
+
+ /* Framebuffer capture pseudo-counter */
+ COUNTER_FILMSTRIP,
+ COUNTER_FREQUENCY,
+ COUNTER_VOLTAGE,
+ COUNTER_VP_ACTIVITY,
+ COUNTER_FP_ACTIVITY,
+ COUNTER_TOTAL_ALLOC_PAGES,
+
+ /* Memory usage counter */
+ COUNTER_MEM_UNTYPED,
+ COUNTER_MEM_VB_IB,
+ COUNTER_MEM_TEXTURE,
+ COUNTER_MEM_VARYING,
+ COUNTER_MEM_RT,
+ COUNTER_MEM_PBUFFER,
+ /* memory usages for gp command */
+ COUNTER_MEM_PLBU_HEAP,
+ COUNTER_MEM_POINTER_ARRAY,
+ COUNTER_MEM_SLAVE_TILELIST,
+ COUNTER_MEM_UNTYPE_GP_CMDLIST,
+ /* memory usages for polygon list command */
+ COUNTER_MEM_POLYGON_CMDLIST,
+ /* memory usages for pp command */
+ COUNTER_MEM_TD,
+ COUNTER_MEM_RSW,
+ /* other memory usages */
+ COUNTER_MEM_SHADER,
+ COUNTER_MEM_STREAMS,
+ COUNTER_MEM_FRAGMENT_STACK,
+ COUNTER_MEM_UNIFORM,
+ /* Special mem usage, which is used for mem pool allocation */
+ COUNTER_MEM_UNTYPE_MEM_POOL,
+ COUNTER_MEM_UNTYPE_SURFACE,
+
+ NUMBER_OF_EVENTS
+} _mali_osk_counter_id;
+
+#define FIRST_ACTIVITY_EVENT ACTIVITY_VP_0
+#define LAST_ACTIVITY_EVENT ACTIVITY_FP_7
+
+#define FIRST_HW_COUNTER COUNTER_L2_0_C0
+#define LAST_HW_COUNTER COUNTER_FP_7_C1
+
+#define FIRST_SW_COUNTER COUNTER_EGL_BLIT_TIME
+#define LAST_SW_COUNTER COUNTER_GLES_LOOP_LINES_COUNT
+
+#define FIRST_SPECIAL_COUNTER COUNTER_FILMSTRIP
+#define LAST_SPECIAL_COUNTER COUNTER_TOTAL_ALLOC_PAGES
+
+#define FIRST_MEM_COUNTER COUNTER_MEM_UNTYPED
+#define LAST_MEM_COUNTER COUNTER_MEM_UNTYPE_SURFACE
+
+#define MALI_PROFILING_MEM_COUNTERS_NUM (LAST_MEM_COUNTER - FIRST_MEM_COUNTER + 1)
+#define MALI_PROFILING_SPECIAL_COUNTERS_NUM (LAST_SPECIAL_COUNTER - FIRST_SPECIAL_COUNTER + 1)
+#define MALI_PROFILING_SW_COUNTERS_NUM (LAST_SW_COUNTER - FIRST_SW_COUNTER + 1)
+
+/**
+ * Define the stream header type for porfiling stream.
+ */
+#define STREAM_HEADER_FRAMEBUFFER 0x05 /* The stream packet header type for framebuffer dumping. */
+#define STREAM_HEADER_COUNTER_VALUE 0x09 /* The stream packet header type for hw/sw/memory counter sampling. */
+#define STREAM_HEADER_CORE_ACTIVITY 0x0a /* The stream packet header type for activity counter sampling. */
+#define STREAM_HEADER_SIZE 5
+
+/**
+ * Define the packet header type of profiling control packet.
+ */
+#define PACKET_HEADER_ERROR 0x80 /* The response packet header type if error. */
+#define PACKET_HEADER_ACK 0x81 /* The response packet header type if OK. */
+#define PACKET_HEADER_COUNTERS_REQUEST 0x82 /* The control packet header type to request counter information from ddk. */
+#define PACKET_HEADER_COUNTERS_ACK 0x83 /* The response packet header type to send out counter information. */
+#define PACKET_HEADER_COUNTERS_ENABLE 0x84 /* The control packet header type to enable counters. */
+#define PACKET_HEADER_START_CAPTURE_VALUE 0x85 /* The control packet header type to start capture values. */
+
+#define PACKET_HEADER_SIZE 5
+
+/**
+ * Structure to pass performance counter data of a Mali core
+ */
+typedef struct _mali_profiling_core_counters {
+ u32 source0;
+ u32 value0;
+ u32 source1;
+ u32 value1;
+} _mali_profiling_core_counters;
+
+/**
+ * Structure to pass performance counter data of Mali L2 cache cores
+ */
+typedef struct _mali_profiling_l2_counter_values {
+ struct _mali_profiling_core_counters cores[MAX_NUM_L2_CACHE_CORES];
+} _mali_profiling_l2_counter_values;
+
+/**
+ * Structure to pass data defining Mali instance in use:
+ *
+ * mali_product_id - Mali product id
+ * mali_version_major - Mali version major number
+ * mali_version_minor - Mali version minor number
+ * num_of_l2_cores - number of L2 cache cores
+ * num_of_fp_cores - number of fragment processor cores
+ * num_of_vp_cores - number of vertex processor cores
+ */
+typedef struct _mali_profiling_mali_version {
+ u32 mali_product_id;
+ u32 mali_version_major;
+ u32 mali_version_minor;
+ u32 num_of_l2_cores;
+ u32 num_of_fp_cores;
+ u32 num_of_vp_cores;
+} _mali_profiling_mali_version;
+
+/**
+ * Structure to define the mali profiling counter struct.
+ */
+typedef struct mali_profiling_counter {
+ char counter_name[40];
+ u32 counter_id;
+ u32 counter_event;
+ u32 prev_counter_value;
+ u32 current_counter_value;
+ u32 key;
+ int enabled;
+} mali_profiling_counter;
+
+/*
+ * List of possible actions to be controlled by Streamline.
+ * The following numbers are used by gator to control the frame buffer dumping and s/w counter reporting.
+ * We cannot use the enums in mali_uk_types.h because they are unknown inside gator.
+ */
+#define FBDUMP_CONTROL_ENABLE (1)
+#define FBDUMP_CONTROL_RATE (2)
+#define SW_COUNTER_ENABLE (3)
+#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
+#define MEM_COUNTER_ENABLE (5)
+#define ANNOTATE_PROFILING_ENABLE (6)
+
+void _mali_profiling_control(u32 action, u32 value);
+
+u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values);
+
+int _mali_profiling_set_event(u32 counter_id, s32 event_id);
+
+u32 _mali_profiling_get_api_version(void);
+
+void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *values);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_PROFILING_GATOR_API_H__ */
diff --git a/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_uk_types.h b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_uk_types.h
new file mode 100644
index 000000000000..a291dfba2c08
--- /dev/null
+++ b/drivers/gpu/arm/utgard/include/linux/mali/mali_utgard_uk_types.h
@@ -0,0 +1,1106 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+ * Class Path Exception
+ * Linking this library statically or dynamically with other modules is making a combined work based on this library.
+ * Thus, the terms and conditions of the GNU General Public License cover the whole combination.
+ * As a special exception, the copyright holders of this library give you permission to link this library with independent modules
+ * to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting
+ * executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions
+ * of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify
+ * this library, you may extend this exception to your version of the library, but you are not obligated to do so.
+ * If you do not wish to do so, delete this exception statement from your version.
+ */
+
+/**
+ * @file mali_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __MALI_UTGARD_UK_TYPES_H__
+#define __MALI_UTGARD_UK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Iteration functions depend on these values being consecutive. */
+#define MALI_UK_TIMELINE_GP 0
+#define MALI_UK_TIMELINE_PP 1
+#define MALI_UK_TIMELINE_SOFT 2
+#define MALI_UK_TIMELINE_MAX 3
+
+#define MALI_UK_BIG_VARYING_SIZE (1024*1024*2)
+
+typedef struct {
+ u32 points[MALI_UK_TIMELINE_MAX];
+ s32 sync_fd;
+} _mali_uk_fence_t;
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_uk_core U/K Core
+ * @{ */
+
+/** Definition of subsystem numbers, to assist in creating a unique identifier
+ * for each U/K call.
+ *
+ * @see _mali_uk_functions */
+typedef enum {
+ _MALI_UK_CORE_SUBSYSTEM, /**< Core Group of U/K calls */
+ _MALI_UK_MEMORY_SUBSYSTEM, /**< Memory Group of U/K calls */
+ _MALI_UK_PP_SUBSYSTEM, /**< Fragment Processor Group of U/K calls */
+ _MALI_UK_GP_SUBSYSTEM, /**< Vertex Processor Group of U/K calls */
+ _MALI_UK_PROFILING_SUBSYSTEM, /**< Profiling Group of U/K calls */
+ _MALI_UK_VSYNC_SUBSYSTEM, /**< VSYNC Group of U/K calls */
+} _mali_uk_subsystem_t;
+
+/** Within a function group each function has its unique sequence number
+ * to assist in creating a unique identifier for each U/K call.
+ *
+ * An ordered pair of numbers selected from
+ * ( \ref _mali_uk_subsystem_t,\ref _mali_uk_functions) will uniquely identify the
+ * U/K call across all groups of functions, and all functions. */
+typedef enum {
+ /** Core functions */
+
+ _MALI_UK_OPEN = 0, /**< _mali_ukk_open() */
+ _MALI_UK_CLOSE, /**< _mali_ukk_close() */
+ _MALI_UK_WAIT_FOR_NOTIFICATION, /**< _mali_ukk_wait_for_notification() */
+ _MALI_UK_GET_API_VERSION, /**< _mali_ukk_get_api_version() */
+ _MALI_UK_POST_NOTIFICATION, /**< _mali_ukk_post_notification() */
+ _MALI_UK_GET_USER_SETTING, /**< _mali_ukk_get_user_setting() *//**< [out] */
+ _MALI_UK_GET_USER_SETTINGS, /**< _mali_ukk_get_user_settings() *//**< [out] */
+ _MALI_UK_REQUEST_HIGH_PRIORITY, /**< _mali_ukk_request_high_priority() */
+ _MALI_UK_TIMELINE_GET_LATEST_POINT, /**< _mali_ukk_timeline_get_latest_point() */
+ _MALI_UK_TIMELINE_WAIT, /**< _mali_ukk_timeline_wait() */
+ _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, /**< _mali_ukk_timeline_create_sync_fence() */
+ _MALI_UK_SOFT_JOB_START, /**< _mali_ukk_soft_job_start() */
+ _MALI_UK_SOFT_JOB_SIGNAL, /**< _mali_ukk_soft_job_signal() */
+ _MALI_UK_PENDING_SUBMIT, /**< _mali_ukk_pending_submit() */
+
+ /** Memory functions */
+
+ _MALI_UK_ALLOC_MEM = 0, /**< _mali_ukk_alloc_mem() */
+ _MALI_UK_FREE_MEM, /**< _mali_ukk_free_mem() */
+ _MALI_UK_BIND_MEM, /**< _mali_ukk_mem_bind() */
+ _MALI_UK_UNBIND_MEM, /**< _mali_ukk_mem_unbind() */
+ _MALI_UK_COW_MEM, /**< _mali_ukk_mem_cow() */
+ _MALI_UK_COW_MODIFY_RANGE, /**< _mali_ukk_mem_cow_modify_range() */
+ _MALI_UK_RESIZE_MEM, /**<._mali_ukk_mem_resize() */
+ _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, /**< _mali_ukk_mem_get_mmu_page_table_dump_size() */
+ _MALI_UK_DUMP_MMU_PAGE_TABLE, /**< _mali_ukk_mem_dump_mmu_page_table() */
+ _MALI_UK_DMA_BUF_GET_SIZE, /**< _mali_ukk_dma_buf_get_size() */
+ _MALI_UK_MEM_WRITE_SAFE, /**< _mali_uku_mem_write_safe() */
+
+ /** Common functions for each core */
+
+ _MALI_UK_START_JOB = 0, /**< Start a Fragment/Vertex Processor Job on a core */
+ _MALI_UK_GET_NUMBER_OF_CORES, /**< Get the number of Fragment/Vertex Processor cores */
+ _MALI_UK_GET_CORE_VERSION, /**< Get the Fragment/Vertex Processor version compatible with all cores */
+
+ /** Fragment Processor Functions */
+
+ _MALI_UK_PP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_pp_start_job() */
+ _MALI_UK_GET_PP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_pp_number_of_cores() */
+ _MALI_UK_GET_PP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_pp_core_version() */
+ _MALI_UK_PP_DISABLE_WB, /**< _mali_ukk_pp_job_disable_wb() */
+ _MALI_UK_PP_AND_GP_START_JOB, /**< _mali_ukk_pp_and_gp_start_job() */
+
+ /** Vertex Processor Functions */
+
+ _MALI_UK_GP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_gp_start_job() */
+ _MALI_UK_GET_GP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_gp_number_of_cores() */
+ _MALI_UK_GET_GP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_gp_core_version() */
+ _MALI_UK_GP_SUSPEND_RESPONSE, /**< _mali_ukk_gp_suspend_response() */
+
+ /** Profiling functions */
+
+ _MALI_UK_PROFILING_ADD_EVENT = 0, /**< __mali_uku_profiling_add_event() */
+ _MALI_UK_PROFILING_REPORT_SW_COUNTERS,/**< __mali_uku_profiling_report_sw_counters() */
+ _MALI_UK_PROFILING_MEMORY_USAGE_GET, /**< __mali_uku_profiling_memory_usage_get() */
+ _MALI_UK_PROFILING_STREAM_FD_GET, /** < __mali_uku_profiling_stream_fd_get() */
+ _MALI_UK_PROFILING_CONTROL_SET, /** < __mali_uku_profiling_control_set() */
+
+ /** VSYNC reporting fuctions */
+ _MALI_UK_VSYNC_EVENT_REPORT = 0, /**< _mali_ukk_vsync_event_report() */
+} _mali_uk_functions;
+
+/** @defgroup _mali_uk_getsysteminfo U/K Get System Info
+ * @{ */
+
+/**
+ * Type definition for the core version number.
+ * Used when returning the version number read from a core
+ *
+ * Its format is that of the 32-bit Version register for a particular core.
+ * Refer to the "Mali200 and MaliGP2 3D Graphics Processor Technical Reference
+ * Manual", ARM DDI 0415C, for more information.
+ */
+typedef u32 _mali_core_version;
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @defgroup _mali_uk_gp_suspend_response_s Vertex Processor Suspend Response
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_gp_suspend_response()
+ *
+ * When _mali_wait_for_notification() receives notification that a
+ * Vertex Processor job was suspended, you need to send a response to indicate
+ * what needs to happen with this job. You can either abort or resume the job.
+ *
+ * - set @c code to indicate response code. This is either @c _MALIGP_JOB_ABORT or
+ * @c _MALIGP_JOB_RESUME_WITH_NEW_HEAP to indicate you will provide a new heap
+ * for the job that will resolve the out of memory condition for the job.
+ * - copy the @c cookie value from the @c _mali_uk_gp_job_suspended_s notification;
+ * this is an identifier for the suspended job
+ * - set @c arguments[0] and @c arguments[1] to zero if you abort the job. If
+ * you resume it, @c argument[0] should specify the Mali start address for the new
+ * heap and @c argument[1] the Mali end address of the heap.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ */
+typedef enum _maligp_job_suspended_response_code {
+ _MALIGP_JOB_ABORT, /**< Abort the Vertex Processor job */
+ _MALIGP_JOB_RESUME_WITH_NEW_HEAP /**< Resume the Vertex Processor job with a new heap */
+} _maligp_job_suspended_response_code;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] cookie from the _mali_uk_gp_job_suspended_s notification */
+ _maligp_job_suspended_response_code code; /**< [in] abort or resume response code, see \ref _maligp_job_suspended_response_code */
+ u32 arguments[2]; /**< [in] 0 when aborting a job. When resuming a job, the Mali start and end address for a new heap to resume the job with */
+} _mali_uk_gp_suspend_response_s;
+
+/** @} */ /* end group _mali_uk_gp_suspend_response_s */
+
+/** @defgroup _mali_uk_gpstartjob_s Vertex Processor Start Job
+ * @{ */
+
+/** @brief Status indicating the result of the execution of a Vertex or Fragment processor job */
+typedef enum {
+ _MALI_UK_JOB_STATUS_END_SUCCESS = 1 << (16 + 0),
+ _MALI_UK_JOB_STATUS_END_OOM = 1 << (16 + 1),
+ _MALI_UK_JOB_STATUS_END_ABORT = 1 << (16 + 2),
+ _MALI_UK_JOB_STATUS_END_TIMEOUT_SW = 1 << (16 + 3),
+ _MALI_UK_JOB_STATUS_END_HANG = 1 << (16 + 4),
+ _MALI_UK_JOB_STATUS_END_SEG_FAULT = 1 << (16 + 5),
+ _MALI_UK_JOB_STATUS_END_ILLEGAL_JOB = 1 << (16 + 6),
+ _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR = 1 << (16 + 7),
+ _MALI_UK_JOB_STATUS_END_SHUTDOWN = 1 << (16 + 8),
+ _MALI_UK_JOB_STATUS_END_SYSTEM_UNUSABLE = 1 << (16 + 9)
+} _mali_uk_job_status;
+
+#define MALIGP2_NUM_REGS_FRAME (6)
+
+/** @brief Arguments for _mali_ukk_gp_start_job()
+ *
+ * To start a Vertex Processor job
+ * - associate the request with a reference to a @c mali_gp_job_info by setting
+ * user_job_ptr to the address of the @c mali_gp_job_info of the job.
+ * - set @c priority to the priority of the @c mali_gp_job_info
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_gp_job_info into @c frame_registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ * When @c _mali_ukk_gp_start_job() returns @c _MALI_OSK_ERR_OK, status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again.
+ *
+ * After the job has started, @c _mali_wait_for_notification() will be notified
+ * that the job finished or got suspended. It may get suspended due to
+ * resource shortage. If it finished (see _mali_ukk_wait_for_notification())
+ * the notification will contain a @c _mali_uk_gp_job_finished_s result. If
+ * it got suspended the notification will contain a @c _mali_uk_gp_job_suspended_s
+ * result.
+ *
+ * The @c _mali_uk_gp_job_finished_s contains the job status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ * In case the job got suspended, @c _mali_uk_gp_job_suspended_s contains
+ * the @c user_job_ptr identifier used to start the job with, the @c reason
+ * why the job stalled (see \ref _maligp_job_suspended_reason) and a @c cookie
+ * to identify the core on which the job stalled. This @c cookie will be needed
+ * when responding to this nofication by means of _mali_ukk_gp_suspend_response().
+ * (see _mali_ukk_gp_suspend_response()). The response is either to abort or
+ * resume the job. If the job got suspended due to an out of memory condition
+ * you may be able to resolve this by providing more memory and resuming the job.
+ *
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 user_job_ptr; /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job */
+ u32 heap_grow_size; /** <[in] the grow size of the plbu heap when out of memory */
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 frame_builder_id; /**< [in] id of the originating frame builder */
+ u32 flush_id; /**< [in] flush id within the originating frame builder */
+ _mali_uk_fence_t fence; /**< [in] fence this job must wait on */
+ u64 timeline_point_ptr; /**< [in,out] pointer to u32: location where point on gp timeline for this job will be written */
+ u32 varying_memsize; /** < [in] size of varying memory to use deffer bind*/
+ u32 varying_alloc_num;
+ u64 varying_alloc_list; /** < [in] memory hanlde list of varying buffer to use deffer bind */
+} _mali_uk_gp_start_job_s;
+
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE (1<<0) /**< Enable performance counter SRC0 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE (1<<1) /**< Enable performance counter SRC1 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_HEATMAP_ENABLE (1<<2) /**< Enable per tile (aka heatmap) generation with for a job (using the enabled counter sources) */
+
+/** @} */ /* end group _mali_uk_gpstartjob_s */
+
+typedef struct {
+ u64 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 heap_current_addr; /**< [out] value of the GP PLB PL heap start address register */
+ u32 perf_counter0; /**< [out] value of performance counter 0 (see ARM DDI0415A) */
+ u32 perf_counter1; /**< [out] value of performance counter 1 (see ARM DDI0415A) */
+ u32 pending_big_job_num;
+} _mali_uk_gp_job_finished_s;
+
+typedef struct {
+ u64 user_job_ptr; /**< [out] identifier for the job in user space */
+ u32 cookie; /**< [out] identifier for the core in kernel space on which the job stalled */
+ u32 heap_added_size;
+} _mali_uk_gp_job_suspended_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @defgroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+#define _MALI_PP_MAX_SUB_JOBS 8
+
+#define _MALI_PP_MAX_FRAME_REGISTERS ((0x058/4)+1)
+
+#define _MALI_PP_MAX_WB_REGISTERS ((0x02C/4)+1)
+
+#define _MALI_DLBU_MAX_REGISTERS 4
+
+/** Flag for _mali_uk_pp_start_job_s */
+#define _MALI_PP_JOB_FLAG_NO_NOTIFICATION (1<<0)
+#define _MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE (1<<1)
+
+/** @defgroup _mali_uk_ppstartjob_s Fragment Processor Start Job
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_pp_start_job()
+ *
+ * To start a Fragment Processor job
+ * - associate the request with a reference to a mali_pp_job by setting
+ * @c user_job_ptr to the address of the @c mali_pp_job of the job.
+ * - set @c priority to the priority of the mali_pp_job
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_pp_job into @c frame_registers.
+ * For MALI200 you also need to copy the write back 0,1 and 2 registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context in @c ctx that was returned from _mali_ukk_open()
+ *
+ * When _mali_ukk_pp_start_job() returns @c _MALI_OSK_ERR_OK, @c status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again.
+ *
+ * After the job has started, _mali_wait_for_notification() will be notified
+ * when the job finished. The notification will contain a
+ * @c _mali_uk_pp_job_finished_s result. It contains the @c user_job_ptr
+ * identifier used to start the job with, the job @c status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than @c watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 user_job_ptr; /**< [in] identifier for the job in user space */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 frame_registers[_MALI_PP_MAX_FRAME_REGISTERS]; /**< [in] core specific registers associated with first sub job, see ARM DDI0415A */
+ u32 frame_registers_addr_frame[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_FRAME registers for sub job 1-7 */
+ u32 frame_registers_addr_stack[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_STACK registers for sub job 1-7 */
+ u32 wb0_registers[_MALI_PP_MAX_WB_REGISTERS];
+ u32 wb1_registers[_MALI_PP_MAX_WB_REGISTERS];
+ u32 wb2_registers[_MALI_PP_MAX_WB_REGISTERS];
+ u32 dlbu_registers[_MALI_DLBU_MAX_REGISTERS]; /**< [in] Dynamic load balancing unit registers */
+ u32 num_cores; /**< [in] Number of cores to set up (valid range: 1-8(M450) or 4(M400)) */
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 frame_builder_id; /**< [in] id of the originating frame builder */
+ u32 flush_id; /**< [in] flush id within the originating frame builder */
+ u32 flags; /**< [in] See _MALI_PP_JOB_FLAG_* for a list of avaiable flags */
+ u32 tilesx; /**< [in] number of tiles in the x direction (needed for heatmap generation */
+ u32 tilesy; /**< [in] number of tiles in y direction (needed for reading the heatmap memory) */
+ u32 heatmap_mem; /**< [in] memory address to store counter values per tile (aka heatmap) */
+ u32 num_memory_cookies; /**< [in] number of memory cookies attached to job */
+ u64 memory_cookies; /**< [in] pointer to array of u32 memory cookies attached to job */
+ _mali_uk_fence_t fence; /**< [in] fence this job must wait on */
+ u64 timeline_point_ptr; /**< [in,out] pointer to location of u32 where point on pp timeline for this job will be written */
+} _mali_uk_pp_start_job_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 gp_args; /**< [in,out] GP uk arguments (see _mali_uk_gp_start_job_s) */
+ u64 pp_args; /**< [in,out] PP uk arguments (see _mali_uk_pp_start_job_s) */
+} _mali_uk_pp_and_gp_start_job_s;
+
+/** @} */ /* end group _mali_uk_ppstartjob_s */
+
+typedef struct {
+ u64 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 perf_counter0[_MALI_PP_MAX_SUB_JOBS]; /**< [out] value of perfomance counter 0 (see ARM DDI0415A), one for each sub job */
+ u32 perf_counter1[_MALI_PP_MAX_SUB_JOBS]; /**< [out] value of perfomance counter 1 (see ARM DDI0415A), one for each sub job */
+ u32 perf_counter_src0;
+ u32 perf_counter_src1;
+} _mali_uk_pp_job_finished_s;
+
+typedef struct {
+ u32 number_of_enabled_cores; /**< [out] the new number of enabled cores */
+} _mali_uk_pp_num_cores_changed_s;
+
+
+
+/**
+ * Flags to indicate write-back units
+ */
+typedef enum {
+ _MALI_UK_PP_JOB_WB0 = 1,
+ _MALI_UK_PP_JOB_WB1 = 2,
+ _MALI_UK_PP_JOB_WB2 = 4,
+} _mali_uk_pp_job_wbx_flag;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 fb_id; /**< [in] Frame builder ID of job to disable WB units for */
+ u32 wb0_memory;
+ u32 wb1_memory;
+ u32 wb2_memory;
+} _mali_uk_pp_disable_wb_s;
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+/** @defgroup _mali_uk_soft_job U/K Soft Job
+ * @{ */
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 user_job; /**< [in] identifier for the job in user space */
+ u64 job_id_ptr; /**< [in,out] pointer to location of u32 where job id will be written */
+ _mali_uk_fence_t fence; /**< [in] fence this job must wait on */
+ u32 point; /**< [out] point on soft timeline for this job */
+ u32 type; /**< [in] type of soft job */
+} _mali_uk_soft_job_start_s;
+
+typedef struct {
+ u64 user_job; /**< [out] identifier for the job in user space */
+} _mali_uk_soft_job_activated_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 job_id; /**< [in] id for soft job */
+} _mali_uk_soft_job_signal_s;
+
+/** @} */ /* end group _mali_uk_soft_job */
+
+typedef struct {
+ u32 counter_id;
+ u32 key;
+ int enable;
+} _mali_uk_annotate_profiling_mem_counter_s;
+
+typedef struct {
+ u32 sampling_rate;
+ int enable;
+} _mali_uk_annotate_profiling_enable_s;
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ * @{ */
+
+/** @defgroup _mali_uk_waitfornotification_s Wait For Notification
+ * @{ */
+
+/** @brief Notification type encodings
+ *
+ * Each Notification type is an ordered pair of (subsystem,id), and is unique.
+ *
+ * The encoding of subsystem,id into a 32-bit word is:
+ * encoding = (( subsystem << _MALI_NOTIFICATION_SUBSYSTEM_SHIFT ) & _MALI_NOTIFICATION_SUBSYSTEM_MASK)
+ * | (( id << _MALI_NOTIFICATION_ID_SHIFT ) & _MALI_NOTIFICATION_ID_MASK)
+ *
+ * @see _mali_uk_wait_for_notification_s
+ */
+typedef enum {
+ /** core notifications */
+
+ _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x20,
+ _MALI_NOTIFICATION_APPLICATION_QUIT = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x40,
+ _MALI_NOTIFICATION_SETTINGS_CHANGED = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x80,
+ _MALI_NOTIFICATION_SOFT_ACTIVATED = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x100,
+
+ /** Fragment Processor notifications */
+
+ _MALI_NOTIFICATION_PP_FINISHED = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x10,
+ _MALI_NOTIFICATION_PP_NUM_CORE_CHANGE = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x20,
+
+ /** Vertex Processor notifications */
+
+ _MALI_NOTIFICATION_GP_FINISHED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x10,
+ _MALI_NOTIFICATION_GP_STALLED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x20,
+
+ /** Profiling notifications */
+ _MALI_NOTIFICATION_ANNOTATE_PROFILING_MEM_COUNTER = (_MALI_UK_PROFILING_SUBSYSTEM << 16) | 0x10,
+ _MALI_NOTIFICATION_ANNOTATE_PROFILING_ENABLE = (_MALI_UK_PROFILING_SUBSYSTEM << 16) | 0x20,
+} _mali_uk_notification_type;
+
+/** to assist in splitting up 32-bit notification value in subsystem and id value */
+#define _MALI_NOTIFICATION_SUBSYSTEM_MASK 0xFFFF0000
+#define _MALI_NOTIFICATION_SUBSYSTEM_SHIFT 16
+#define _MALI_NOTIFICATION_ID_MASK 0x0000FFFF
+#define _MALI_NOTIFICATION_ID_SHIFT 0
+
+
+/** @brief Enumeration of possible settings which match mali_setting_t in user space
+ *
+ *
+ */
+typedef enum {
+ _MALI_UK_USER_SETTING_SW_EVENTS_ENABLE = 0,
+ _MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_DEPTHBUFFER_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_STENCILBUFFER_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_PER_TILE_COUNTERS_CAPTURE_ENABLED,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_COMPOSITOR,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_WINDOW,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_OTHER,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES,
+ _MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR,
+ _MALI_UK_USER_SETTING_SW_COUNTER_ENABLED,
+ _MALI_UK_USER_SETTING_MAX,
+} _mali_uk_user_setting_t;
+
+/* See mali_user_settings_db.c */
+extern const char *_mali_uk_user_setting_descriptions[];
+#define _MALI_UK_USER_SETTING_DESCRIPTIONS \
+ { \
+ "sw_events_enable", \
+ "colorbuffer_capture_enable", \
+ "depthbuffer_capture_enable", \
+ "stencilbuffer_capture_enable", \
+ "per_tile_counters_enable", \
+ "buffer_capture_compositor", \
+ "buffer_capture_window", \
+ "buffer_capture_other", \
+ "buffer_capture_n_frames", \
+ "buffer_capture_resize_factor", \
+ "sw_counters_enable", \
+ };
+
+/** @brief struct to hold the value to a particular setting as seen in the kernel space
+ */
+typedef struct {
+ _mali_uk_user_setting_t setting;
+ u32 value;
+} _mali_uk_settings_changed_s;
+
+/** @brief Arguments for _mali_ukk_wait_for_notification()
+ *
+ * On successful return from _mali_ukk_wait_for_notification(), the members of
+ * this structure will indicate the reason for notification.
+ *
+ * Specifically, the source of the notification can be identified by the
+ * subsystem and id fields of the mali_uk_notification_type in the code.type
+ * member. The type member is encoded in a way to divide up the types into a
+ * subsystem field, and a per-subsystem ID field. See
+ * _mali_uk_notification_type for more information.
+ *
+ * Interpreting the data union member depends on the notification type:
+ *
+ * - type == _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS
+ * - The kernel side is shutting down. No further
+ * _mali_uk_wait_for_notification() calls should be made.
+ * - In this case, the value of the data union member is undefined.
+ * - This is used to indicate to the user space client that it should close
+ * the connection to the Mali Device Driver.
+ * - type == _MALI_NOTIFICATION_PP_FINISHED
+ * - The notification data is of type _mali_uk_pp_job_finished_s. It contains the user_job_ptr
+ * identifier used to start the job with, the job status, the number of milliseconds the job took to render,
+ * and values of core registers when the job finished (irq status, performance counters, renderer list
+ * address).
+ * - A job has finished succesfully when its status member is _MALI_UK_JOB_STATUS_FINISHED.
+ * - If the hardware detected a timeout while rendering the job, or software detected the job is
+ * taking more than watchdog_msecs (see _mali_ukk_pp_start_job()) to complete, the status member will
+ * indicate _MALI_UK_JOB_STATUS_HANG.
+ * - If the hardware detected a bus error while accessing memory associated with the job, status will
+ * indicate _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * - Status will indicate MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to stop the job but the job
+ * didn't start the hardware yet, e.g. when the driver closes.
+ * - type == _MALI_NOTIFICATION_GP_FINISHED
+ * - The notification data is of type _mali_uk_gp_job_finished_s. The notification is similar to that of
+ * type == _MALI_NOTIFICATION_PP_FINISHED, except that several other GP core register values are returned.
+ * The status values have the same meaning for type == _MALI_NOTIFICATION_PP_FINISHED.
+ * - type == _MALI_NOTIFICATION_GP_STALLED
+ * - The nofication data is of type _mali_uk_gp_job_suspended_s. It contains the user_job_ptr
+ * identifier used to start the job with, the reason why the job stalled and a cookie to identify the core on
+ * which the job stalled.
+ * - The reason member of gp_job_suspended is set to _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY
+ * when the polygon list builder unit has run out of memory.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [out] Type of notification available */
+ union {
+ _mali_uk_gp_job_suspended_s gp_job_suspended;/**< [out] Notification data for _MALI_NOTIFICATION_GP_STALLED notification type */
+ _mali_uk_gp_job_finished_s gp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_GP_FINISHED notification type */
+ _mali_uk_pp_job_finished_s pp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_PP_FINISHED notification type */
+ _mali_uk_settings_changed_s setting_changed;/**< [out] Notification data for _MALI_NOTIFICAATION_SETTINGS_CHANGED notification type */
+ _mali_uk_soft_job_activated_s soft_job_activated; /**< [out] Notification data for _MALI_NOTIFICATION_SOFT_ACTIVATED notification type */
+ _mali_uk_annotate_profiling_mem_counter_s profiling_mem_counter;
+ _mali_uk_annotate_profiling_enable_s profiling_enable;
+ } data;
+} _mali_uk_wait_for_notification_s;
+
+/** @brief Arguments for _mali_ukk_post_notification()
+ *
+ * Posts the specified notification to the notification queue for this application.
+ * This is used to send a quit message to the callback thread.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [in] Type of notification to post */
+} _mali_uk_post_notification_s;
+
+/** @} */ /* end group _mali_uk_waitfornotification_s */
+
+/** @defgroup _mali_uk_getapiversion_s Get API Version
+ * @{ */
+
+/** helpers for Device Driver API version handling */
+
+/** @brief Encode a version ID from a 16-bit input
+ *
+ * @note the input is assumed to be 16 bits. It must not exceed 16 bits. */
+#define _MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+
+/** @brief Check whether a 32-bit value is likely to be Device Driver API
+ * version ID. */
+#define _IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+
+/** @brief Decode a 16-bit version number from a 32-bit Device Driver API version
+ * ID */
+#define _GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+
+/** @brief Determine whether two 32-bit encoded version IDs match */
+#define _IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * For example, for version 1 the value would be 0x00010001
+ */
+#define _MALI_API_VERSION 800
+#define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION)
+
+/**
+ * The API version is a 16-bit integer stored in both the lower and upper 16-bits
+ * of a 32-bit value. The 16-bit API version value is incremented on each API
+ * change. Version 1 would be 0x00010001. Used in _mali_uk_get_api_version_s.
+ */
+typedef u32 _mali_uk_api_version;
+
+/** @brief Arguments for _mali_uk_get_api_version()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct {
+ u32 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_api_version version; /**< [in,out] API version of user-side interface. */
+ int compatible; /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_s;
+
+/** @brief Arguments for _mali_uk_get_api_version_v2()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_api_version version; /**< [in,out] API version of user-side interface. */
+ int compatible; /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_v2_s;
+
+/** @} */ /* end group _mali_uk_getapiversion_s */
+
+/** @defgroup _mali_uk_get_user_settings_s Get user space settings */
+
+/** @brief struct to keep the matching values of the user space settings within certain context
+ *
+ * Each member of the settings array corresponds to a matching setting in the user space and its value is the value
+ * of that particular setting.
+ *
+ * All settings are given reference to the context pointed to by the ctx pointer.
+ *
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 settings[_MALI_UK_USER_SETTING_MAX]; /**< [out] The values for all settings */
+} _mali_uk_get_user_settings_s;
+
+/** @brief struct to hold the value of a particular setting from the user space within a given context
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_user_setting_t setting; /**< [in] setting to get */
+ u32 value; /**< [out] value of setting */
+} _mali_uk_get_user_setting_s;
+
+/** @brief Arguments for _mali_ukk_request_high_priority() */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_request_high_priority_s;
+
+/** @brief Arguments for _mali_ukk_pending_submit() */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_pending_submit_s;
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_memory U/K Memory
+ * @{ */
+
+#define _MALI_MEMORY_ALLOCATE_RESIZEABLE (1<<4) /* BUFFER can trim dow/grow*/
+#define _MALI_MEMORY_ALLOCATE_NO_BIND_GPU (1<<5) /*Not map to GPU when allocate, must call bind later*/
+#define _MALI_MEMORY_ALLOCATE_SWAPPABLE (1<<6) /* Allocate swappale memory. */
+#define _MALI_MEMORY_ALLOCATE_DEFER_BIND (1<<7) /*Not map to GPU when allocate, must call bind later*/
+
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 gpu_vaddr; /**< [in] GPU virtual address */
+ u32 vsize; /**< [in] vitrual size of the allocation */
+ u32 psize; /**< [in] physical size of the allocation */
+ u32 flags;
+ u64 backend_handle; /**< [out] backend handle */
+ struct {
+ /* buffer types*/
+ /* CPU read/write info*/
+ } buffer_info;
+} _mali_uk_alloc_mem_s;
+
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 gpu_vaddr; /**< [in] use as handle to free allocation */
+ u32 free_pages_nr; /** < [out] record the number of free pages */
+} _mali_uk_free_mem_s;
+
+
+#define _MALI_MEMORY_BIND_BACKEND_UMP (1<<8)
+#define _MALI_MEMORY_BIND_BACKEND_DMA_BUF (1<<9)
+#define _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY (1<<10)
+#define _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY (1<<11)
+#define _MALI_MEMORY_BIND_BACKEND_EXT_COW (1<<12)
+#define _MALI_MEMORY_BIND_BACKEND_HAVE_ALLOCATION (1<<13)
+
+
+#define _MALI_MEMORY_BIND_BACKEND_MASK (_MALI_MEMORY_BIND_BACKEND_UMP| \
+ _MALI_MEMORY_BIND_BACKEND_DMA_BUF |\
+ _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY |\
+ _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY |\
+ _MALI_MEMORY_BIND_BACKEND_EXT_COW |\
+ _MALI_MEMORY_BIND_BACKEND_HAVE_ALLOCATION)
+
+
+#define _MALI_MEMORY_GPU_READ_ALLOCATE (1<<16)
+
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 vaddr; /**< [in] mali address to map the physical memory to */
+ u32 size; /**< [in] size */
+ u32 flags; /**< [in] see_MALI_MEMORY_BIND_BACKEND_* */
+ u32 padding; /** padding for 32/64 struct alignment */
+ union {
+ struct {
+ u32 secure_id; /**< [in] secure id */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ } bind_ump;
+ struct {
+ u32 mem_fd; /**< [in] Memory descriptor */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ } bind_dma_buf;
+ struct {
+ /**/
+ } bind_mali_memory;
+ struct {
+ u32 phys_addr; /**< [in] physical address */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ } bind_ext_memory;
+ } mem_union;
+} _mali_uk_bind_mem_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 flags; /**< [in] see_MALI_MEMORY_BIND_BACKEND_* */
+ u32 vaddr; /**< [in] identifier for mapped memory object in kernel space */
+} _mali_uk_unbind_mem_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 target_handle; /**< [in] handle of allocation need to do COW */
+ u32 target_offset; /**< [in] offset in target allocation to do COW(for support COW a memory allocated from memory_bank, PAGE_SIZE align)*/
+ u32 target_size; /**< [in] size of target allocation to do COW (for support memory bank, PAGE_SIZE align)(in byte) */
+ u32 range_start; /**< [in] re allocate range start offset, offset from the start of allocation (PAGE_SIZE align)*/
+ u32 range_size; /**< [in] re allocate size (PAGE_SIZE align)*/
+ u32 vaddr; /**< [in] mali address for the new allocaiton */
+ u32 backend_handle; /**< [out] backend handle */
+ u32 flags;
+} _mali_uk_cow_mem_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 range_start; /**< [in] re allocate range start offset, offset from the start of allocation */
+ u32 size; /**< [in] re allocate size*/
+ u32 vaddr; /**< [in] mali address for the new allocaiton */
+ s32 change_pages_nr; /**< [out] record the page number change for cow operation */
+} _mali_uk_cow_modify_range_s;
+
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 mem_fd; /**< [in] Memory descriptor */
+ u32 size; /**< [out] size */
+} _mali_uk_dma_buf_get_size_s;
+
+/** Flag for _mali_uk_map_external_mem_s, _mali_uk_attach_ump_mem_s and _mali_uk_attach_dma_buf_s */
+#define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0)
+
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 vaddr; /* the buffer to do resize*/
+ u32 psize; /* wanted physical size of this memory */
+} _mali_uk_mem_resize_s;
+
+/**
+ * @brief Arguments for _mali_uk[uk]_mem_write_safe()
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 src; /**< [in] Pointer to source data */
+ u64 dest; /**< [in] Destination Mali buffer */
+ u32 size; /**< [in,out] Number of bytes to write/copy on input, number of bytes actually written/copied on output */
+} _mali_uk_mem_write_safe_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [out] size of MMU page table information (registers + page tables) */
+} _mali_uk_query_mmu_page_table_dump_size_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [in] size of buffer to receive mmu page table information */
+ u64 buffer; /**< [in,out] buffer to receive mmu page table information */
+ u32 register_writes_size; /**< [out] size of MMU register dump */
+ u64 register_writes; /**< [out] pointer within buffer where MMU register dump is stored */
+ u32 page_table_dump_size; /**< [out] size of MMU page table dump */
+ u64 page_table_dump; /**< [out] pointer within buffer where MMU page table dump is stored */
+} _mali_uk_dump_mmu_page_table_s;
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_pp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_number_of_cores(), @c number_of_cores
+ * will contain the number of Fragment Processor cores in the system.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_total_cores; /**< [out] Total number of Fragment Processor cores in the system */
+ u32 number_of_enabled_cores; /**< [out] Number of enabled Fragment Processor cores */
+} _mali_uk_get_pp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_pp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_core_version(), @c version contains
+ * the version that all Fragment Processor cores are compatible with.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+ u32 padding;
+} _mali_uk_get_pp_core_version_s;
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_gp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_number_of_cores(), @c number_of_cores
+ * will contain the number of Vertex Processor cores in the system.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_cores; /**< [out] number of Vertex Processor cores in the system */
+} _mali_uk_get_gp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_gp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_core_version(), @c version contains
+ * the version that all Vertex Processor cores are compatible with.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_gp_core_version_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 event_id; /**< [in] event id to register (see enum mali_profiling_events for values) */
+ u32 data[5]; /**< [in] event specific data */
+} _mali_uk_profiling_add_event_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 memory_usage; /**< [out] total memory usage */
+ u32 vaddr; /**< [in] mali address for the cow allocaiton */
+ s32 change_pages_nr; /**< [out] record the page number change for cow operation */
+} _mali_uk_profiling_memory_usage_get_s;
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments to _mali_ukk_mem_mmap()
+ *
+ * Use of the phys_addr member depends on whether the driver is compiled for
+ * Mali-MMU or nonMMU:
+ * - in the nonMMU case, this is the physical address of the memory as seen by
+ * the CPU (which may be a constant offset from that used by Mali)
+ * - in the MMU case, this is the Mali Virtual base address of the memory to
+ * allocate, and the particular physical pages used to back the memory are
+ * entirely determined by _mali_ukk_mem_mmap(). The details of the physical pages
+ * are not reported to user-space for security reasons.
+ *
+ * The cookie member must be stored for use later when freeing the memory by
+ * calling _mali_ukk_mem_munmap(). In the Mali-MMU case, the cookie is secure.
+ *
+ * The ukk_private word must be set to zero when calling from user-space. On
+ * Kernel-side, the OS implementation of the U/K interface can use it to
+ * communicate data to the OS implementation of the OSK layer. In particular,
+ * _mali_ukk_get_big_block() directly calls _mali_ukk_mem_mmap directly, and
+ * will communicate its own ukk_private word through the ukk_private member
+ * here. The common code itself will not inspect or modify the ukk_private
+ * word, and so it may be safely used for whatever purposes necessary to
+ * integrate Mali Memory handling into the OS.
+ *
+ * The uku_private member is currently reserved for use by the user-side
+ * implementation of the U/K interface. Its value must be zero.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [out] Returns user-space virtual address for the mapping */
+ u32 size; /**< [in] Size of the requested mapping */
+ u32 phys_addr; /**< [in] Physical address - could be offset, depending on caller+callee convention */
+ mali_bool writeable;
+} _mali_uk_mem_mmap_s;
+
+/** @brief Arguments to _mali_ukk_mem_munmap()
+ *
+ * The cookie and mapping members must be that returned from the same previous
+ * call to _mali_ukk_mem_mmap(). The size member must correspond to cookie
+ * and mapping - that is, it must be the value originally supplied to a call to
+ * _mali_ukk_mem_mmap that returned the values of mapping and cookie.
+ *
+ * An error will be returned if an attempt is made to unmap only part of the
+ * originally obtained range, or to unmap more than was originally obtained.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [in] The mapping returned from mmap call */
+ u32 size; /**< [in] The size passed to mmap call */
+} _mali_uk_mem_munmap_s;
+/** @} */ /* end group _mali_uk_memory */
+
+/** @defgroup _mali_uk_vsync U/K VSYNC Wait Reporting Module
+ * @{ */
+
+/** @brief VSYNC events
+ *
+ * These events are reported when DDK starts to wait for vsync and when the
+ * vsync has occured and the DDK can continue on the next frame.
+ */
+typedef enum _mali_uk_vsync_event {
+ _MALI_UK_VSYNC_EVENT_BEGIN_WAIT = 0,
+ _MALI_UK_VSYNC_EVENT_END_WAIT
+} _mali_uk_vsync_event;
+
+/** @brief Arguments to _mali_ukk_vsync_event()
+ *
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_vsync_event event; /**< [in] VSYNCH event type */
+} _mali_uk_vsync_event_report_s;
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @defgroup _mali_uk_sw_counters_report U/K Software Counter Reporting
+ * @{ */
+
+/** @brief Software counter values
+ *
+ * Values recorded for each of the software counters during a single renderpass.
+ */
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 counters; /**< [in] The array of u32 counter values */
+ u32 num_counters; /**< [in] The number of elements in counters array */
+} _mali_uk_sw_counters_report_s;
+
+/** @} */ /* end group _mali_uk_sw_counters_report */
+
+/** @defgroup _mali_uk_timeline U/K Mali Timeline
+ * @{ */
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 timeline; /**< [in] timeline id */
+ u32 point; /**< [out] latest point on timeline */
+} _mali_uk_timeline_get_latest_point_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_fence_t fence; /**< [in] fence */
+ u32 timeout; /**< [in] timeout (0 for no wait, -1 for blocking) */
+ u32 status; /**< [out] status of fence (1 if signaled, 0 if timeout) */
+} _mali_uk_timeline_wait_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_fence_t fence; /**< [in] mali fence to create linux sync fence from */
+ s32 sync_fd; /**< [out] file descriptor for new linux sync fence */
+} _mali_uk_timeline_create_sync_fence_s;
+
+/** @} */ /* end group _mali_uk_timeline */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ s32 stream_fd; /**< [in] The profiling kernel base stream fd handle */
+} _mali_uk_profiling_stream_fd_get_s;
+
+typedef struct {
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u64 control_packet_data; /**< [in] the control packet data for control settings */
+ u32 control_packet_size; /**< [in] The control packet size */
+ u64 response_packet_data; /** < [out] The response packet data */
+ u32 response_packet_size; /** < [in,out] The response packet data */
+} _mali_uk_profiling_control_set_s;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UTGARD_UK_TYPES_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/license/gpl/mali_kernel_license.h b/drivers/gpu/arm/utgard/linux/license/gpl/mali_kernel_license.h
new file mode 100644
index 000000000000..c88c992e859b
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/license/gpl/mali_kernel_license.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2010, 2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __MALI_KERNEL_LICENSE_H__
+#define __MALI_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MALI_KERNEL_LINUX_LICENSE "GPL"
+#define MALI_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LICENSE_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_device_pause_resume.c b/drivers/gpu/arm/utgard/linux/mali_device_pause_resume.c
new file mode 100644
index 000000000000..37076a2eaf67
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_device_pause_resume.c
@@ -0,0 +1,36 @@
+/**
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_device_pause_resume.c
+ * Implementation of the Mali pause/resume functionality
+ */
+
+#include <linux/module.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_pm.h"
+
+void mali_dev_pause(void)
+{
+ /*
+ * Deactive all groups to prevent hardware being touched
+ * during the period of mali device pausing
+ */
+ mali_pm_os_suspend(MALI_FALSE);
+}
+
+EXPORT_SYMBOL(mali_dev_pause);
+
+void mali_dev_resume(void)
+{
+ mali_pm_os_resume();
+}
+
+EXPORT_SYMBOL(mali_dev_resume);
diff --git a/drivers/gpu/arm/utgard/linux/mali_kernel_linux.c b/drivers/gpu/arm/utgard/linux/mali_kernel_linux.c
new file mode 100644
index 000000000000..34cb3d7ca95a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_kernel_linux.c
@@ -0,0 +1,941 @@
+/**
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_linux.c
+ * Implementation of the Linux device driver entrypoints
+ */
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/fs.h> /* file system operations */
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/mm.h> /* memory manager definitions */
+#include <linux/mali/mali_utgard_ioctl.h>
+#include <linux/version.h>
+#include <linux/device.h>
+#include "mali_kernel_license.h"
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/bug.h>
+#include <linux/of.h>
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_linux.h"
+#include "mali_ukk.h"
+#include "mali_ukk_wrappers.h"
+#include "mali_kernel_sysfs.h"
+#include "mali_pm.h"
+#include "mali_kernel_license.h"
+#include "mali_memory.h"
+#include "mali_memory_dma_buf.h"
+#include "mali_memory_manager.h"
+#include "mali_memory_swap_alloc.h"
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include "mali_profiling_internal.h"
+#endif
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+#include "mali_osk_profiling.h"
+#include "mali_dvfs_policy.h"
+
+static int is_first_resume = 1;
+/*Store the clk and vol for boot/insmod and mali_resume*/
+static struct mali_gpu_clk_item mali_gpu_clk[2];
+#endif
+
+/* Streamline support for the Mali driver */
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_MALI400_PROFILING)
+/* Ask Linux to create the tracepoints */
+#define CREATE_TRACE_POINTS
+#include "mali_linux_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_event);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_hw_counter);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mali_sw_counters);
+#endif /* CONFIG_TRACEPOINTS */
+
+/* from the __malidrv_build_info.c file that is generated during build */
+extern const char *__malidrv_build_info(void);
+
+/* Module parameter to control log level */
+int mali_debug_level = 2;
+module_param(mali_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_debug_level, "Higher number, more dmesg output");
+
+extern int mali_max_job_runtime;
+module_param(mali_max_job_runtime, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_job_runtime, "Maximum allowed job runtime in msecs.\nJobs will be killed after this no matter what");
+
+extern int mali_l2_max_reads;
+module_param(mali_l2_max_reads, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_l2_max_reads, "Maximum reads for Mali L2 cache");
+
+extern unsigned int mali_dedicated_mem_start;
+module_param(mali_dedicated_mem_start, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_dedicated_mem_start, "Physical start address of dedicated Mali GPU memory.");
+
+extern unsigned int mali_dedicated_mem_size;
+module_param(mali_dedicated_mem_size, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_dedicated_mem_size, "Size of dedicated Mali GPU memory.");
+
+extern unsigned int mali_shared_mem_size;
+module_param(mali_shared_mem_size, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_shared_mem_size, "Size of shared Mali GPU memory.");
+
+#if defined(CONFIG_MALI400_PROFILING)
+extern int mali_boot_profiling;
+module_param(mali_boot_profiling, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_boot_profiling, "Start profiling as a part of Mali driver initialization");
+#endif
+
+extern int mali_max_pp_cores_group_1;
+module_param(mali_max_pp_cores_group_1, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_pp_cores_group_1, "Limit the number of PP cores to use from first PP group.");
+
+extern int mali_max_pp_cores_group_2;
+module_param(mali_max_pp_cores_group_2, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_pp_cores_group_2, "Limit the number of PP cores to use from second PP group (Mali-450 only).");
+
+extern unsigned int mali_mem_swap_out_threshold_value;
+module_param(mali_mem_swap_out_threshold_value, uint, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_mem_swap_out_threshold_value, "Threshold value used to limit how much swappable memory cached in Mali driver.");
+
+#if defined(CONFIG_MALI_DVFS)
+/** the max fps the same as display vsync default 60, can set by module insert parameter */
+extern int mali_max_system_fps;
+module_param(mali_max_system_fps, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_system_fps, "Max system fps the same as display VSYNC.");
+
+/** a lower limit on their desired FPS default 58, can set by module insert parameter*/
+extern int mali_desired_fps;
+module_param(mali_desired_fps, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_desired_fps, "A bit lower than max_system_fps which user desired fps");
+#endif
+
+#if MALI_ENABLE_CPU_CYCLES
+#include <linux/cpumask.h>
+#include <linux/timer.h>
+#include <asm/smp.h>
+static struct timer_list mali_init_cpu_clock_timers[8];
+static u32 mali_cpu_clock_last_value[8] = {0,};
+#endif
+
+/* Export symbols from common code: mali_user_settings.c */
+#include "mali_user_settings_db.h"
+EXPORT_SYMBOL(mali_set_user_setting);
+EXPORT_SYMBOL(mali_get_user_setting);
+
+static char mali_dev_name[] = "mali"; /* should be const, but the functions we call requires non-cost */
+
+/* This driver only supports one Mali device, and this variable stores this single platform device */
+struct platform_device *mali_platform_device = NULL;
+
+/* This driver only supports one Mali device, and this variable stores the exposed misc device (/dev/mali) */
+static struct miscdevice mali_miscdevice = { 0, };
+
+static int mali_miscdevice_register(struct platform_device *pdev);
+static void mali_miscdevice_unregister(void);
+
+static int mali_open(struct inode *inode, struct file *filp);
+static int mali_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+
+static int mali_probe(struct platform_device *pdev);
+static int mali_remove(struct platform_device *pdev);
+
+static int mali_driver_suspend_scheduler(struct device *dev);
+static int mali_driver_resume_scheduler(struct device *dev);
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_driver_runtime_suspend(struct device *dev);
+static int mali_driver_runtime_resume(struct device *dev);
+static int mali_driver_runtime_idle(struct device *dev);
+#endif
+
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+#if defined(CONFIG_MALI_DT)
+extern int mali_platform_device_init(struct platform_device *device);
+extern int mali_platform_device_deinit(struct platform_device *device);
+#else
+extern int mali_platform_device_register(void);
+extern int mali_platform_device_unregister(void);
+#endif
+#endif
+
+/* Linux power management operations provided by the Mali device driver */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
+struct pm_ext_ops mali_dev_ext_pm_ops = {
+ .base =
+ {
+ .suspend = mali_driver_suspend_scheduler,
+ .resume = mali_driver_resume_scheduler,
+ .freeze = mali_driver_suspend_scheduler,
+ .thaw = mali_driver_resume_scheduler,
+ },
+};
+#else
+static const struct dev_pm_ops mali_dev_pm_ops = {
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = mali_driver_runtime_suspend,
+ .runtime_resume = mali_driver_runtime_resume,
+ .runtime_idle = mali_driver_runtime_idle,
+#endif
+ .suspend = mali_driver_suspend_scheduler,
+ .resume = mali_driver_resume_scheduler,
+ .freeze = mali_driver_suspend_scheduler,
+ .thaw = mali_driver_resume_scheduler,
+ .poweroff = mali_driver_suspend_scheduler,
+};
+#endif
+
+#ifdef CONFIG_MALI_DT
+static struct of_device_id base_dt_ids[] = {
+ {.compatible = "arm,mali-300"},
+ {.compatible = "arm,mali-400"},
+ {.compatible = "arm,mali-450"},
+ {.compatible = "arm,mali-470"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, base_dt_ids);
+#endif
+
+/* The Mali device driver struct */
+static struct platform_driver mali_platform_driver = {
+ .probe = mali_probe,
+ .remove = mali_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29))
+ .pm = &mali_dev_ext_pm_ops,
+#endif
+ .driver =
+ {
+ .name = MALI_GPU_NAME_UTGARD,
+ .owner = THIS_MODULE,
+ .bus = &platform_bus_type,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
+ .pm = &mali_dev_pm_ops,
+#endif
+#ifdef CONFIG_MALI_DT
+ .of_match_table = of_match_ptr(base_dt_ids),
+#endif
+ },
+};
+
+/* Linux misc device operations (/dev/mali) */
+struct file_operations mali_fops = {
+ .owner = THIS_MODULE,
+ .open = mali_open,
+ .release = mali_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = mali_ioctl,
+#else
+ .ioctl = mali_ioctl,
+#endif
+ .compat_ioctl = mali_ioctl,
+ .mmap = mali_mmap
+};
+
+#if MALI_ENABLE_CPU_CYCLES
+void mali_init_cpu_time_counters(int reset, int enable_divide_by_64)
+{
+ /* The CPU assembly reference used is: ARM Architecture Reference Manual ARMv7-AR C.b */
+ u32 write_value;
+
+ /* See B4.1.116 PMCNTENSET, Performance Monitors Count Enable Set register, VMSA */
+ /* setting p15 c9 c12 1 to 0x8000000f==CPU_CYCLE_ENABLE |EVENT_3_ENABLE|EVENT_2_ENABLE|EVENT_1_ENABLE|EVENT_0_ENABLE */
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" :: "r"(0x8000000f));
+
+
+ /* See B4.1.117 PMCR, Performance Monitors Control Register. Writing to p15, c9, c12, 0 */
+ write_value = 1 << 0; /* Bit 0 set. Enable counters */
+ if (reset) {
+ write_value |= 1 << 1; /* Reset event counters */
+ write_value |= 1 << 2; /* Reset cycle counter */
+ }
+ if (enable_divide_by_64) {
+ write_value |= 1 << 3; /* Enable the Clock divider by 64 */
+ }
+ write_value |= 1 << 4; /* Export enable. Not needed */
+ asm volatile("MCR p15, 0, %0, c9, c12, 0\t\n" :: "r"(write_value));
+
+ /* PMOVSR Overflow Flag Status Register - Clear Clock and Event overflows */
+ asm volatile("MCR p15, 0, %0, c9, c12, 3\t\n" :: "r"(0x8000000f));
+
+
+ /* See B4.1.124 PMUSERENR - setting p15 c9 c14 to 1" */
+ /* User mode access to the Performance Monitors enabled. */
+ /* Lets User space read cpu clock cycles */
+ asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(1));
+}
+
+/** A timer function that configures the cycle clock counter on current CPU.
+ * The function \a mali_init_cpu_time_counters_on_all_cpus sets up this
+ * function to trigger on all Cpus during module load.
+ */
+static void mali_init_cpu_clock_timer_func(unsigned long data)
+{
+ int reset_counters, enable_divide_clock_counter_by_64;
+ int current_cpu = raw_smp_processor_id();
+ unsigned int sample0;
+ unsigned int sample1;
+
+ MALI_IGNORE(data);
+
+ reset_counters = 1;
+ enable_divide_clock_counter_by_64 = 0;
+ mali_init_cpu_time_counters(reset_counters, enable_divide_clock_counter_by_64);
+
+ sample0 = mali_get_cpu_cyclecount();
+ sample1 = mali_get_cpu_cyclecount();
+
+ MALI_DEBUG_PRINT(3, ("Init Cpu %d cycle counter- First two samples: %08x %08x \n", current_cpu, sample0, sample1));
+}
+
+/** A timer functions for storing current time on all cpus.
+ * Used for checking if the clocks have similar values or if they are drifting.
+ */
+static void mali_print_cpu_clock_timer_func(unsigned long data)
+{
+ int current_cpu = raw_smp_processor_id();
+ unsigned int sample0;
+
+ MALI_IGNORE(data);
+ sample0 = mali_get_cpu_cyclecount();
+ if (current_cpu < 8) {
+ mali_cpu_clock_last_value[current_cpu] = sample0;
+ }
+}
+
+/** Init the performance registers on all CPUs to count clock cycles.
+ * For init \a print_only should be 0.
+ * If \a print_only is 1, it will intead print the current clock value of all CPUs.
+ */
+void mali_init_cpu_time_counters_on_all_cpus(int print_only)
+{
+ int i = 0;
+ int cpu_number;
+ int jiffies_trigger;
+ int jiffies_wait;
+
+ jiffies_wait = 2;
+ jiffies_trigger = jiffies + jiffies_wait;
+
+ for (i = 0 ; i < 8 ; i++) {
+ init_timer(&mali_init_cpu_clock_timers[i]);
+ if (print_only) mali_init_cpu_clock_timers[i].function = mali_print_cpu_clock_timer_func;
+ else mali_init_cpu_clock_timers[i].function = mali_init_cpu_clock_timer_func;
+ mali_init_cpu_clock_timers[i].expires = jiffies_trigger ;
+ }
+ cpu_number = cpumask_first(cpu_online_mask);
+ for (i = 0 ; i < 8 ; i++) {
+ int next_cpu;
+ add_timer_on(&mali_init_cpu_clock_timers[i], cpu_number);
+ next_cpu = cpumask_next(cpu_number, cpu_online_mask);
+ if (next_cpu >= nr_cpu_ids) break;
+ cpu_number = next_cpu;
+ }
+
+ while (jiffies_wait) jiffies_wait = schedule_timeout_uninterruptible(jiffies_wait);
+
+ for (i = 0 ; i < 8 ; i++) {
+ del_timer_sync(&mali_init_cpu_clock_timers[i]);
+ }
+
+ if (print_only) {
+ if ((0 == mali_cpu_clock_last_value[2]) && (0 == mali_cpu_clock_last_value[3])) {
+ /* Diff can be printed if we want to check if the clocks are in sync
+ int diff = mali_cpu_clock_last_value[0] - mali_cpu_clock_last_value[1];*/
+ MALI_DEBUG_PRINT(2, ("CPU cycle counters readout all: %08x %08x\n", mali_cpu_clock_last_value[0], mali_cpu_clock_last_value[1]));
+ } else {
+ MALI_DEBUG_PRINT(2, ("CPU cycle counters readout all: %08x %08x %08x %08x\n", mali_cpu_clock_last_value[0], mali_cpu_clock_last_value[1], mali_cpu_clock_last_value[2], mali_cpu_clock_last_value[3]));
+ }
+ }
+}
+#endif
+
+int mali_module_init(void)
+{
+ int err = 0;
+
+ MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n", _MALI_API_VERSION));
+ MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__));
+ MALI_DEBUG_PRINT(2, ("Driver revision: %s\n", SVN_REV_STRING));
+
+#if MALI_ENABLE_CPU_CYCLES
+ mali_init_cpu_time_counters_on_all_cpus(0);
+ MALI_DEBUG_PRINT(2, ("CPU cycle counter setup complete\n"));
+ /* Printing the current cpu counters */
+ mali_init_cpu_time_counters_on_all_cpus(1);
+#endif
+
+ /* Initialize module wide settings */
+#ifdef MALI_FAKE_PLATFORM_DEVICE
+#ifndef CONFIG_MALI_DT
+ MALI_DEBUG_PRINT(2, ("mali_module_init() registering device\n"));
+ err = mali_platform_device_register();
+ if (0 != err) {
+ return err;
+ }
+#endif
+#endif
+
+ MALI_DEBUG_PRINT(2, ("mali_module_init() registering driver\n"));
+
+ err = platform_driver_register(&mali_platform_driver);
+
+ if (0 != err) {
+ MALI_DEBUG_PRINT(2, ("mali_module_init() Failed to register driver (%d)\n", err));
+#ifdef MALI_FAKE_PLATFORM_DEVICE
+#ifndef CONFIG_MALI_DT
+ mali_platform_device_unregister();
+#endif
+#endif
+ mali_platform_device = NULL;
+ return err;
+ }
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ err = _mali_internal_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
+ if (0 != err) {
+ /* No biggie if we wheren't able to initialize the profiling */
+ MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n"));
+ }
+#endif
+
+ /* Tracing the current frequency and voltage from boot/insmod*/
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+ /* Just call mali_get_current_gpu_clk_item(),to record current clk info.*/
+ mali_get_current_gpu_clk_item(&mali_gpu_clk[0]);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ mali_gpu_clk[0].clock,
+ mali_gpu_clk[0].vol / 1000,
+ 0, 0, 0);
+#endif
+
+ MALI_PRINT(("Mali device driver loaded\n"));
+
+ return 0; /* Success */
+}
+
+void mali_module_exit(void)
+{
+ MALI_DEBUG_PRINT(2, ("Unloading Mali v%d device driver.\n", _MALI_API_VERSION));
+
+ MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering driver\n"));
+
+ platform_driver_unregister(&mali_platform_driver);
+
+#if defined(MALI_FAKE_PLATFORM_DEVICE)
+#ifndef CONFIG_MALI_DT
+ MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering device\n"));
+ mali_platform_device_unregister();
+#endif
+#endif
+
+ /* Tracing the current frequency and voltage from rmmod*/
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ 0,
+ 0,
+ 0, 0, 0);
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ _mali_internal_profiling_term();
+#endif
+
+ MALI_PRINT(("Mali device driver unloaded\n"));
+}
+
+static int mali_probe(struct platform_device *pdev)
+{
+ int err;
+
+ MALI_DEBUG_PRINT(2, ("mali_probe(): Called for platform device %s\n", pdev->name));
+
+ if (NULL != mali_platform_device) {
+ /* Already connected to a device, return error */
+ MALI_PRINT_ERROR(("mali_probe(): The Mali driver is already connected with a Mali device."));
+ return -EEXIST;
+ }
+
+ mali_platform_device = pdev;
+
+#ifdef CONFIG_MALI_DT
+ /* If we use DT to initialize our DDK, we have to prepare somethings. */
+ err = mali_platform_device_init(mali_platform_device);
+ if (0 != err) {
+ MALI_PRINT_ERROR(("mali_probe(): Failed to initialize platform device."));
+ return -EFAULT;
+ }
+#endif
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_wq_init()) {
+ /* Initialize the Mali GPU HW specified by pdev */
+ if (_MALI_OSK_ERR_OK == mali_initialize_subsystems()) {
+ /* Register a misc device (so we are accessible from user space) */
+ err = mali_miscdevice_register(pdev);
+ if (0 == err) {
+ /* Setup sysfs entries */
+ err = mali_sysfs_register(mali_dev_name);
+
+ if (0 == err) {
+ MALI_DEBUG_PRINT(2, ("mali_probe(): Successfully initialized driver for platform device %s\n", pdev->name));
+
+ return 0;
+ } else {
+ MALI_PRINT_ERROR(("mali_probe(): failed to register sysfs entries"));
+ }
+ mali_miscdevice_unregister();
+ } else {
+ MALI_PRINT_ERROR(("mali_probe(): failed to register Mali misc device."));
+ }
+ mali_terminate_subsystems();
+ } else {
+ MALI_PRINT_ERROR(("mali_probe(): Failed to initialize Mali device driver."));
+ }
+ _mali_osk_wq_term();
+ }
+
+ mali_platform_device = NULL;
+ return -EFAULT;
+}
+
+static int mali_remove(struct platform_device *pdev)
+{
+ MALI_DEBUG_PRINT(2, ("mali_remove() called for platform device %s\n", pdev->name));
+ mali_sysfs_unregister();
+ mali_miscdevice_unregister();
+ mali_terminate_subsystems();
+ _mali_osk_wq_term();
+#ifdef CONFIG_MALI_DT
+ mali_platform_device_deinit(mali_platform_device);
+#endif
+ mali_platform_device = NULL;
+ return 0;
+}
+
+static int mali_miscdevice_register(struct platform_device *pdev)
+{
+ int err;
+
+ mali_miscdevice.minor = MISC_DYNAMIC_MINOR;
+ mali_miscdevice.name = mali_dev_name;
+ mali_miscdevice.fops = &mali_fops;
+ mali_miscdevice.parent = get_device(&pdev->dev);
+
+ err = misc_register(&mali_miscdevice);
+ if (0 != err) {
+ MALI_PRINT_ERROR(("Failed to register misc device, misc_register() returned %d\n", err));
+ }
+
+ return err;
+}
+
+static void mali_miscdevice_unregister(void)
+{
+ misc_deregister(&mali_miscdevice);
+}
+
+static int mali_driver_suspend_scheduler(struct device *dev)
+{
+ mali_pm_os_suspend(MALI_TRUE);
+ /* Tracing the frequency and voltage after mali is suspended */
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ 0,
+ 0,
+ 0, 0, 0);
+ return 0;
+}
+
+static int mali_driver_resume_scheduler(struct device *dev)
+{
+ /* Tracing the frequency and voltage after mali is resumed */
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+ /* Just call mali_get_current_gpu_clk_item() once,to record current clk info.*/
+ if (is_first_resume == 1) {
+ mali_get_current_gpu_clk_item(&mali_gpu_clk[1]);
+ is_first_resume = 0;
+ }
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ mali_gpu_clk[1].clock,
+ mali_gpu_clk[1].vol / 1000,
+ 0, 0, 0);
+#endif
+ mali_pm_os_resume();
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_driver_runtime_suspend(struct device *dev)
+{
+ if (MALI_TRUE == mali_pm_runtime_suspend()) {
+ /* Tracing the frequency and voltage after mali is suspended */
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ 0,
+ 0,
+ 0, 0, 0);
+
+ return 0;
+ } else {
+ return -EBUSY;
+ }
+}
+
+static int mali_driver_runtime_resume(struct device *dev)
+{
+ /* Tracing the frequency and voltage after mali is resumed */
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+ /* Just call mali_get_current_gpu_clk_item() once,to record current clk info.*/
+ if (is_first_resume == 1) {
+ mali_get_current_gpu_clk_item(&mali_gpu_clk[1]);
+ is_first_resume = 0;
+ }
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ mali_gpu_clk[1].clock,
+ mali_gpu_clk[1].vol / 1000,
+ 0, 0, 0);
+#endif
+
+ mali_pm_runtime_resume();
+ return 0;
+}
+
+static int mali_driver_runtime_idle(struct device *dev)
+{
+ /* Nothing to do */
+ return 0;
+}
+#endif
+
+static int mali_open(struct inode *inode, struct file *filp)
+{
+ struct mali_session_data *session_data;
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (mali_miscdevice.minor != iminor(inode)) {
+ MALI_PRINT_ERROR(("mali_open() Minor does not match\n"));
+ return -ENODEV;
+ }
+
+ /* allocated struct to track this session */
+ err = _mali_ukk_open((void **)&session_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* initialize file pointer */
+ filp->f_pos = 0;
+
+ /* link in our session data */
+ filp->private_data = (void *)session_data;
+
+ filp->f_mapping = mali_mem_swap_get_global_swap_file()->f_mapping;
+
+ return 0;
+}
+
+static int mali_release(struct inode *inode, struct file *filp)
+{
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (mali_miscdevice.minor != iminor(inode)) {
+ MALI_PRINT_ERROR(("mali_release() Minor does not match\n"));
+ return -ENODEV;
+ }
+
+ err = _mali_ukk_close((void **)&filp->private_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int map_errcode(_mali_osk_errcode_t err)
+{
+ switch (err) {
+ case _MALI_OSK_ERR_OK :
+ return 0;
+ case _MALI_OSK_ERR_FAULT:
+ return -EFAULT;
+ case _MALI_OSK_ERR_INVALID_FUNC:
+ return -ENOTTY;
+ case _MALI_OSK_ERR_INVALID_ARGS:
+ return -EINVAL;
+ case _MALI_OSK_ERR_NOMEM:
+ return -ENOMEM;
+ case _MALI_OSK_ERR_TIMEOUT:
+ return -ETIMEDOUT;
+ case _MALI_OSK_ERR_RESTARTSYSCALL:
+ return -ERESTARTSYS;
+ case _MALI_OSK_ERR_ITEM_NOT_FOUND:
+ return -ENOENT;
+ default:
+ return -EFAULT;
+ }
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ int err;
+ struct mali_session_data *session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+ /* inode not used */
+ (void)inode;
+#endif
+
+ MALI_DEBUG_PRINT(7, ("Ioctl received 0x%08X 0x%08lX\n", cmd, arg));
+
+ session_data = (struct mali_session_data *)filp->private_data;
+ if (NULL == session_data) {
+ MALI_DEBUG_PRINT(7, ("filp->private_data was NULL\n"));
+ return -ENOTTY;
+ }
+
+ if (NULL == (void *)arg) {
+ MALI_DEBUG_PRINT(7, ("arg was NULL\n"));
+ return -ENOTTY;
+ }
+
+ switch (cmd) {
+ case MALI_IOC_WAIT_FOR_NOTIFICATION:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_wait_for_notification_s), sizeof(u64)));
+ err = wait_for_notification_wrapper(session_data, (_mali_uk_wait_for_notification_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_API_VERSION_V2:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_api_version_v2_s), sizeof(u64)));
+ err = get_api_version_v2_wrapper(session_data, (_mali_uk_get_api_version_v2_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_API_VERSION:
+ err = get_api_version_wrapper(session_data, (_mali_uk_get_api_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_POST_NOTIFICATION:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_post_notification_s), sizeof(u64)));
+ err = post_notification_wrapper(session_data, (_mali_uk_post_notification_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_USER_SETTINGS:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_user_settings_s), sizeof(u64)));
+ err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
+ break;
+
+ case MALI_IOC_REQUEST_HIGH_PRIORITY:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_request_high_priority_s), sizeof(u64)));
+ err = request_high_priority_wrapper(session_data, (_mali_uk_request_high_priority_s __user *)arg);
+ break;
+
+ case MALI_IOC_PENDING_SUBMIT:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pending_submit_s), sizeof(u64)));
+ err = pending_submit_wrapper(session_data, (_mali_uk_pending_submit_s __user *)arg);
+ break;
+
+#if defined(CONFIG_MALI400_PROFILING)
+ case MALI_IOC_PROFILING_ADD_EVENT:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_add_event_s), sizeof(u64)));
+ err = profiling_add_event_wrapper(session_data, (_mali_uk_profiling_add_event_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_REPORT_SW_COUNTERS:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_sw_counters_report_s), sizeof(u64)));
+ err = profiling_report_sw_counters_wrapper(session_data, (_mali_uk_sw_counters_report_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_STREAM_FD_GET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_stream_fd_get_s), sizeof(u64)));
+ err = profiling_get_stream_fd_wrapper(session_data, (_mali_uk_profiling_stream_fd_get_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROILING_CONTROL_SET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_control_set_s), sizeof(u64)));
+ err = profiling_control_set_wrapper(session_data, (_mali_uk_profiling_control_set_s __user *)arg);
+ break;
+#else
+
+ case MALI_IOC_PROFILING_ADD_EVENT: /* FALL-THROUGH */
+ case MALI_IOC_PROFILING_REPORT_SW_COUNTERS: /* FALL-THROUGH */
+ MALI_DEBUG_PRINT(2, ("Profiling not supported\n"));
+ err = -ENOTTY;
+ break;
+#endif
+
+ case MALI_IOC_PROFILING_MEMORY_USAGE_GET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_memory_usage_get_s), sizeof(u64)));
+ err = mem_usage_get_wrapper(session_data, (_mali_uk_profiling_memory_usage_get_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_ALLOC:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_alloc_mem_s), sizeof(u64)));
+ err = mem_alloc_wrapper(session_data, (_mali_uk_alloc_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_FREE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_free_mem_s), sizeof(u64)));
+ err = mem_free_wrapper(session_data, (_mali_uk_free_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_BIND:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_bind_mem_s), sizeof(u64)));
+ err = mem_bind_wrapper(session_data, (_mali_uk_bind_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_UNBIND:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_unbind_mem_s), sizeof(u64)));
+ err = mem_unbind_wrapper(session_data, (_mali_uk_unbind_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_COW:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_cow_mem_s), sizeof(u64)));
+ err = mem_cow_wrapper(session_data, (_mali_uk_cow_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_COW_MODIFY_RANGE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_cow_modify_range_s), sizeof(u64)));
+ err = mem_cow_modify_range_wrapper(session_data, (_mali_uk_cow_modify_range_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_RESIZE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_mem_resize_s), sizeof(u64)));
+ err = mem_resize_mem_wrapper(session_data, (_mali_uk_mem_resize_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_WRITE_SAFE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_mem_write_safe_s), sizeof(u64)));
+ err = mem_write_safe_wrapper(session_data, (_mali_uk_mem_write_safe_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_query_mmu_page_table_dump_size_s), sizeof(u64)));
+ err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_dump_mmu_page_table_s), sizeof(u64)));
+ err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_DMA_BUF_GET_SIZE:
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_dma_buf_get_size_s), sizeof(u64)));
+ err = mali_dma_buf_get_size(session_data, (_mali_uk_dma_buf_get_size_s __user *)arg);
+#else
+ MALI_DEBUG_PRINT(2, ("DMA-BUF not supported\n"));
+ err = -ENOTTY;
+#endif
+ break;
+
+ case MALI_IOC_PP_START_JOB:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_start_job_s), sizeof(u64)));
+ err = pp_start_job_wrapper(session_data, (_mali_uk_pp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_AND_GP_START_JOB:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_and_gp_start_job_s), sizeof(u64)));
+ err = pp_and_gp_start_job_wrapper(session_data, (_mali_uk_pp_and_gp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_NUMBER_OF_CORES_GET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_pp_number_of_cores_s), sizeof(u64)));
+ err = pp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_pp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_CORE_VERSION_GET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_pp_core_version_s), sizeof(u64)));
+ err = pp_get_core_version_wrapper(session_data, (_mali_uk_get_pp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_DISABLE_WB:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_disable_wb_s), sizeof(u64)));
+ err = pp_disable_wb_wrapper(session_data, (_mali_uk_pp_disable_wb_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_START_JOB:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_gp_start_job_s), sizeof(u64)));
+ err = gp_start_job_wrapper(session_data, (_mali_uk_gp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_NUMBER_OF_CORES_GET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_gp_number_of_cores_s), sizeof(u64)));
+ err = gp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_gp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_CORE_VERSION_GET:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_gp_core_version_s), sizeof(u64)));
+ err = gp_get_core_version_wrapper(session_data, (_mali_uk_get_gp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_SUSPEND_RESPONSE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_gp_suspend_response_s), sizeof(u64)));
+ err = gp_suspend_response_wrapper(session_data, (_mali_uk_gp_suspend_response_s __user *)arg);
+ break;
+
+ case MALI_IOC_VSYNC_EVENT_REPORT:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_vsync_event_report_s), sizeof(u64)));
+ err = vsync_event_report_wrapper(session_data, (_mali_uk_vsync_event_report_s __user *)arg);
+ break;
+
+ case MALI_IOC_TIMELINE_GET_LATEST_POINT:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_get_latest_point_s), sizeof(u64)));
+ err = timeline_get_latest_point_wrapper(session_data, (_mali_uk_timeline_get_latest_point_s __user *)arg);
+ break;
+ case MALI_IOC_TIMELINE_WAIT:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_wait_s), sizeof(u64)));
+ err = timeline_wait_wrapper(session_data, (_mali_uk_timeline_wait_s __user *)arg);
+ break;
+ case MALI_IOC_TIMELINE_CREATE_SYNC_FENCE:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_create_sync_fence_s), sizeof(u64)));
+ err = timeline_create_sync_fence_wrapper(session_data, (_mali_uk_timeline_create_sync_fence_s __user *)arg);
+ break;
+ case MALI_IOC_SOFT_JOB_START:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_soft_job_start_s), sizeof(u64)));
+ err = soft_job_start_wrapper(session_data, (_mali_uk_soft_job_start_s __user *)arg);
+ break;
+ case MALI_IOC_SOFT_JOB_SIGNAL:
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_soft_job_signal_s), sizeof(u64)));
+ err = soft_job_signal_wrapper(session_data, (_mali_uk_soft_job_signal_s __user *)arg);
+ break;
+
+ default:
+ MALI_DEBUG_PRINT(2, ("No handler for ioctl 0x%08X 0x%08lX\n", cmd, arg));
+ err = -ENOTTY;
+ };
+
+ return err;
+}
+
+
+module_init(mali_module_init);
+module_exit(mali_module_exit);
+
+MODULE_LICENSE(MALI_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/drivers/gpu/arm/utgard/linux/mali_kernel_linux.h b/drivers/gpu/arm/utgard/linux/mali_kernel_linux.h
new file mode 100644
index 000000000000..f6042712d1e3
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_kernel_linux.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_LINUX_H__
+#define __MALI_KERNEL_LINUX_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/idr.h>
+#include <linux/rbtree.h>
+#include "mali_kernel_license.h"
+#include "mali_osk_types.h"
+
+extern struct platform_device *mali_platform_device;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_kernel_sysfs.c b/drivers/gpu/arm/utgard/linux/mali_kernel_sysfs.c
new file mode 100644
index 000000000000..4f06ca14bf48
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_kernel_sysfs.c
@@ -0,0 +1,1410 @@
+/**
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+/**
+ * @file mali_kernel_sysfs.c
+ * Implementation of some sysfs data exports
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include "mali_kernel_license.h"
+#include "mali_kernel_common.h"
+#include "mali_ukk.h"
+
+#if MALI_LICENSE_IS_GPL
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_sysfs.h"
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+#include <linux/slab.h>
+#include "mali_osk_profiling.h"
+#endif
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_pm.h"
+#include "mali_pmu.h"
+#include "mali_group.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_kernel_core.h"
+#include "mali_user_settings_db.h"
+#include "mali_profiling_internal.h"
+#include "mali_gp_job.h"
+#include "mali_pp_job.h"
+#include "mali_executor.h"
+
+#define PRIVATE_DATA_COUNTER_MAKE_GP(src) (src)
+#define PRIVATE_DATA_COUNTER_MAKE_PP(src) ((1 << 24) | src)
+#define PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(src, sub_job) ((1 << 24) | (1 << 16) | (sub_job << 8) | src)
+#define PRIVATE_DATA_COUNTER_IS_PP(a) ((((a) >> 24) & 0xFF) ? MALI_TRUE : MALI_FALSE)
+#define PRIVATE_DATA_COUNTER_GET_SRC(a) (a & 0xFF)
+#define PRIVATE_DATA_COUNTER_IS_SUB_JOB(a) ((((a) >> 16) & 0xFF) ? MALI_TRUE : MALI_FALSE)
+#define PRIVATE_DATA_COUNTER_GET_SUB_JOB(a) (((a) >> 8) & 0xFF)
+
+#define POWER_BUFFER_SIZE 3
+
+static struct dentry *mali_debugfs_dir = NULL;
+
+typedef enum {
+ _MALI_DEVICE_SUSPEND,
+ _MALI_DEVICE_RESUME,
+ _MALI_DEVICE_DVFS_PAUSE,
+ _MALI_DEVICE_DVFS_RESUME,
+ _MALI_MAX_EVENTS
+} _mali_device_debug_power_events;
+
+static const char *const mali_power_events[_MALI_MAX_EVENTS] = {
+ [_MALI_DEVICE_SUSPEND] = "suspend",
+ [_MALI_DEVICE_RESUME] = "resume",
+ [_MALI_DEVICE_DVFS_PAUSE] = "dvfs_pause",
+ [_MALI_DEVICE_DVFS_RESUME] = "dvfs_resume",
+};
+
+static mali_bool power_always_on_enabled = MALI_FALSE;
+
+static int open_copy_private_data(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t group_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+ struct mali_group *group;
+
+ group = (struct mali_group *)filp->private_data;
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ r = snprintf(buffer, 64, "%u\n",
+ mali_executor_group_is_disabled(group) ? 0 : 1);
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static ssize_t group_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+ unsigned long val;
+ struct mali_group *group;
+
+ group = (struct mali_group *)filp->private_data;
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ if (count >= sizeof(buffer)) {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count)) {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ r = kstrtoul(&buffer[0], 10, &val);
+ if (0 != r) {
+ return -EINVAL;
+ }
+
+ switch (val) {
+ case 1:
+ mali_executor_group_enable(group);
+ break;
+ case 0:
+ mali_executor_group_disable(group);
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ *offp += count;
+ return count;
+}
+
+static const struct file_operations group_enabled_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = group_enabled_read,
+ .write = group_enabled_write,
+};
+
+static ssize_t hw_core_base_addr_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+ struct mali_hw_core *hw_core;
+
+ hw_core = (struct mali_hw_core *)filp->private_data;
+ MALI_DEBUG_ASSERT_POINTER(hw_core);
+
+ r = snprintf(buffer, 64, "0x%lX\n", hw_core->phys_addr);
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations hw_core_base_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = hw_core_base_addr_read,
+};
+
+static ssize_t profiling_counter_src_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((uintptr_t)filp->private_data);
+ u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((uintptr_t)filp->private_data);
+ mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((uintptr_t)filp->private_data);
+ u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((uintptr_t)filp->private_data);
+ char buf[64];
+ int r;
+ u32 val;
+
+ if (MALI_TRUE == is_pp) {
+ /* PP counter */
+ if (MALI_TRUE == is_sub_job) {
+ /* Get counter for a particular sub job */
+ if (0 == src_id) {
+ val = mali_pp_job_get_pp_counter_sub_job_src0(sub_job);
+ } else {
+ val = mali_pp_job_get_pp_counter_sub_job_src1(sub_job);
+ }
+ } else {
+ /* Get default counter for all PP sub jobs */
+ if (0 == src_id) {
+ val = mali_pp_job_get_pp_counter_global_src0();
+ } else {
+ val = mali_pp_job_get_pp_counter_global_src1();
+ }
+ }
+ } else {
+ /* GP counter */
+ if (0 == src_id) {
+ val = mali_gp_job_get_gp_counter_src0();
+ } else {
+ val = mali_gp_job_get_gp_counter_src1();
+ }
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == val) {
+ r = snprintf(buf, 64, "-1\n");
+ } else {
+ r = snprintf(buf, 64, "%u\n", val);
+ }
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t profiling_counter_src_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((uintptr_t)filp->private_data);
+ u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((uintptr_t)filp->private_data);
+ mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((uintptr_t)filp->private_data);
+ u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((uintptr_t)filp->private_data);
+ char buf[64];
+ long val;
+ int ret;
+
+ if (cnt >= sizeof(buf)) {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (val < 0) {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ if (MALI_TRUE == is_pp) {
+ /* PP counter */
+ if (MALI_TRUE == is_sub_job) {
+ /* Set counter for a particular sub job */
+ if (0 == src_id) {
+ mali_pp_job_set_pp_counter_sub_job_src0(sub_job, (u32)val);
+ } else {
+ mali_pp_job_set_pp_counter_sub_job_src1(sub_job, (u32)val);
+ }
+ } else {
+ /* Set default counter for all PP sub jobs */
+ if (0 == src_id) {
+ mali_pp_job_set_pp_counter_global_src0((u32)val);
+ } else {
+ mali_pp_job_set_pp_counter_global_src1((u32)val);
+ }
+ }
+ } else {
+ /* GP counter */
+ if (0 == src_id) {
+ mali_gp_job_set_gp_counter_src0((u32)val);
+ } else {
+ mali_gp_job_set_gp_counter_src1((u32)val);
+ }
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static const struct file_operations profiling_counter_src_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = profiling_counter_src_read,
+ .write = profiling_counter_src_write,
+};
+
+static ssize_t l2_l2x_counter_srcx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ int r;
+ u32 val;
+ struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+
+ if (0 == src_id) {
+ val = mali_l2_cache_core_get_counter_src0(l2_core);
+ } else {
+ val = mali_l2_cache_core_get_counter_src1(l2_core);
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER == val) {
+ r = snprintf(buf, 64, "-1\n");
+ } else {
+ r = snprintf(buf, 64, "%u\n", val);
+ }
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t l2_l2x_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+ char buf[64];
+ long val;
+ int ret;
+
+ if (cnt >= sizeof(buf)) {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (val < 0) {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ mali_l2_cache_core_set_counter_src(l2_core, src_id, (u32)val);
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t l2_all_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ long val;
+ int ret;
+ u32 l2_id;
+ struct mali_l2_cache_core *l2_cache;
+
+ if (cnt >= sizeof(buf)) {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (val < 0) {
+ /* any negative input will disable counter */
+ val = MALI_HW_CORE_NO_COUNTER;
+ }
+
+ l2_id = 0;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ while (NULL != l2_cache) {
+ mali_l2_cache_core_set_counter_src(l2_cache, src_id, (u32)val);
+
+ /* try next L2 */
+ l2_id++;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t l2_l2x_counter_src0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_src1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t l2_l2x_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static ssize_t l2_all_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_all_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 1);
+}
+
+static const struct file_operations l2_l2x_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_src0_read,
+ .write = l2_l2x_counter_src0_write,
+};
+
+static const struct file_operations l2_l2x_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_src1_read,
+ .write = l2_l2x_counter_src1_write,
+};
+
+static const struct file_operations l2_all_counter_src0_fops = {
+ .owner = THIS_MODULE,
+ .write = l2_all_counter_src0_write,
+};
+
+static const struct file_operations l2_all_counter_src1_fops = {
+ .owner = THIS_MODULE,
+ .write = l2_all_counter_src1_write,
+};
+
+static ssize_t l2_l2x_counter_valx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ int r;
+ u32 src0 = 0;
+ u32 val0 = 0;
+ u32 src1 = 0;
+ u32 val1 = 0;
+ u32 val = -1;
+ struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+
+ mali_l2_cache_core_get_counter_values(l2_core, &src0, &val0, &src1, &val1);
+
+ if (0 == src_id) {
+ if (MALI_HW_CORE_NO_COUNTER != val0) {
+ val = val0;
+ }
+ } else {
+ if (MALI_HW_CORE_NO_COUNTER != val1) {
+ val = val1;
+ }
+ }
+
+ r = snprintf(buf, 64, "%u\n", val);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t l2_l2x_counter_val0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_valx_read(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_val1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_valx_read(filp, ubuf, cnt, ppos, 1);
+}
+
+static const struct file_operations l2_l2x_counter_val0_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_val0_read,
+};
+
+static const struct file_operations l2_l2x_counter_val1_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_val1_read,
+};
+
+static ssize_t power_always_on_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ unsigned long val;
+ int ret;
+ char buf[32];
+
+ cnt = min(cnt, sizeof(buf) - 1);
+ if (copy_from_user(buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+ buf[cnt] = '\0';
+
+ ret = kstrtoul(buf, 10, &val);
+ if (0 != ret) {
+ return ret;
+ }
+
+ /* Update setting (not exactly thread safe) */
+ if (1 == val && MALI_FALSE == power_always_on_enabled) {
+ power_always_on_enabled = MALI_TRUE;
+ _mali_osk_pm_dev_ref_get_sync();
+ } else if (0 == val && MALI_TRUE == power_always_on_enabled) {
+ power_always_on_enabled = MALI_FALSE;
+ _mali_osk_pm_dev_ref_put();
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t power_always_on_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ if (MALI_TRUE == power_always_on_enabled) {
+ return simple_read_from_buffer(ubuf, cnt, ppos, "1\n", 2);
+ } else {
+ return simple_read_from_buffer(ubuf, cnt, ppos, "0\n", 2);
+ }
+}
+
+static const struct file_operations power_always_on_fops = {
+ .owner = THIS_MODULE,
+ .read = power_always_on_read,
+ .write = power_always_on_write,
+};
+
+static ssize_t power_power_events_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_SUSPEND], strlen(mali_power_events[_MALI_DEVICE_SUSPEND]) - 1)) {
+ mali_pm_os_suspend(MALI_TRUE);
+ } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_RESUME], strlen(mali_power_events[_MALI_DEVICE_RESUME]) - 1)) {
+ mali_pm_os_resume();
+ } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_PAUSE], strlen(mali_power_events[_MALI_DEVICE_DVFS_PAUSE]) - 1)) {
+ mali_dev_pause();
+ } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_RESUME], strlen(mali_power_events[_MALI_DEVICE_DVFS_RESUME]) - 1)) {
+ mali_dev_resume();
+ }
+ *ppos += cnt;
+ return cnt;
+}
+
+static loff_t power_power_events_seek(struct file *file, loff_t offset, int orig)
+{
+ file->f_pos = offset;
+ return 0;
+}
+
+static const struct file_operations power_power_events_fops = {
+ .owner = THIS_MODULE,
+ .write = power_power_events_write,
+ .llseek = power_power_events_seek,
+};
+
+#if MALI_STATE_TRACKING
+static int mali_seq_internal_state_show(struct seq_file *seq_file, void *v)
+{
+ u32 len = 0;
+ u32 size;
+ char *buf;
+
+ size = seq_get_buf(seq_file, &buf);
+
+ if (!size) {
+ return -ENOMEM;
+ }
+
+ /* Create the internal state dump. */
+ len = snprintf(buf + len, size - len, "Mali device driver %s\n", SVN_REV_STRING);
+ len += snprintf(buf + len, size - len, "License: %s\n\n", MALI_KERNEL_LINUX_LICENSE);
+
+ len += _mali_kernel_core_dump_state(buf + len, size - len);
+
+ seq_commit(seq_file, len);
+
+ return 0;
+}
+
+static int mali_seq_internal_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mali_seq_internal_state_show, NULL);
+}
+
+static const struct file_operations mali_seq_internal_state_fops = {
+ .owner = THIS_MODULE,
+ .open = mali_seq_internal_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* MALI_STATE_TRACKING */
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+static ssize_t profiling_record_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int r;
+
+ r = snprintf(buf, 64, "%u\n", _mali_internal_profiling_is_recording() ? 1 : 0);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t profiling_record_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf)) {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (val != 0) {
+ u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* This can be made configurable at a later stage if we need to */
+
+ /* check if we are already recording */
+ if (MALI_TRUE == _mali_internal_profiling_is_recording()) {
+ MALI_DEBUG_PRINT(3, ("Recording of profiling events already in progress\n"));
+ return -EFAULT;
+ }
+
+ /* check if we need to clear out an old recording first */
+ if (MALI_TRUE == _mali_internal_profiling_have_recording()) {
+ if (_MALI_OSK_ERR_OK != _mali_internal_profiling_clear()) {
+ MALI_DEBUG_PRINT(3, ("Failed to clear existing recording of profiling events\n"));
+ return -EFAULT;
+ }
+ }
+
+ /* start recording profiling data */
+ if (_MALI_OSK_ERR_OK != _mali_internal_profiling_start(&limit)) {
+ MALI_DEBUG_PRINT(3, ("Failed to start recording of profiling events\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Profiling recording started (max %u events)\n", limit));
+ } else {
+ /* stop recording profiling data */
+ u32 count = 0;
+ if (_MALI_OSK_ERR_OK != _mali_internal_profiling_stop(&count)) {
+ MALI_DEBUG_PRINT(2, ("Failed to stop recording of profiling events\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(2, ("Profiling recording stopped (recorded %u events)\n", count));
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static const struct file_operations profiling_record_fops = {
+ .owner = THIS_MODULE,
+ .read = profiling_record_read,
+ .write = profiling_record_write,
+};
+
+static void *profiling_events_start(struct seq_file *s, loff_t *pos)
+{
+ loff_t *spos;
+
+ /* check if we have data avaiable */
+ if (MALI_TRUE != _mali_internal_profiling_have_recording()) {
+ return NULL;
+ }
+
+ spos = kmalloc(sizeof(loff_t), GFP_KERNEL);
+ if (NULL == spos) {
+ return NULL;
+ }
+
+ *spos = *pos;
+ return spos;
+}
+
+static void *profiling_events_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ loff_t *spos = v;
+
+ /* check if we have data avaiable */
+ if (MALI_TRUE != _mali_internal_profiling_have_recording()) {
+ return NULL;
+ }
+
+ /* check if the next entry actually is avaiable */
+ if (_mali_internal_profiling_get_count() <= (u32)(*spos + 1)) {
+ return NULL;
+ }
+
+ *pos = ++*spos;
+ return spos;
+}
+
+static void profiling_events_stop(struct seq_file *s, void *v)
+{
+ kfree(v);
+}
+
+static int profiling_events_show(struct seq_file *seq_file, void *v)
+{
+ loff_t *spos = v;
+ u32 index;
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+
+ index = (u32) * spos;
+
+ /* Retrieve all events */
+ if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, &timestamp, &event_id, data)) {
+ seq_printf(seq_file, "%llu %u %u %u %u %u %u\n", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]);
+ return 0;
+ }
+
+ return 0;
+}
+
+static int profiling_events_show_human_readable(struct seq_file *seq_file, void *v)
+{
+#define MALI_EVENT_ID_IS_HW(event_id) (((event_id & 0x00FF0000) >= MALI_PROFILING_EVENT_CHANNEL_GP0) && ((event_id & 0x00FF0000) <= MALI_PROFILING_EVENT_CHANNEL_PP7))
+
+ static u64 start_time = 0;
+ loff_t *spos = v;
+ u32 index;
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+
+ index = (u32) * spos;
+
+ /* Retrieve all events */
+ if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, &timestamp, &event_id, data)) {
+ seq_printf(seq_file, "%llu %u %u %u %u %u %u # ", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]);
+
+ if (0 == index) {
+ start_time = timestamp;
+ }
+
+ seq_printf(seq_file, "[%06u] ", index);
+
+ switch (event_id & 0x0F000000) {
+ case MALI_PROFILING_EVENT_TYPE_SINGLE:
+ seq_printf(seq_file, "SINGLE | ");
+ break;
+ case MALI_PROFILING_EVENT_TYPE_START:
+ seq_printf(seq_file, "START | ");
+ break;
+ case MALI_PROFILING_EVENT_TYPE_STOP:
+ seq_printf(seq_file, "STOP | ");
+ break;
+ case MALI_PROFILING_EVENT_TYPE_SUSPEND:
+ seq_printf(seq_file, "SUSPEND | ");
+ break;
+ case MALI_PROFILING_EVENT_TYPE_RESUME:
+ seq_printf(seq_file, "RESUME | ");
+ break;
+ default:
+ seq_printf(seq_file, "0x%01X | ", (event_id & 0x0F000000) >> 24);
+ break;
+ }
+
+ switch (event_id & 0x00FF0000) {
+ case MALI_PROFILING_EVENT_CHANNEL_SOFTWARE:
+ seq_printf(seq_file, "SW | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_GP0:
+ seq_printf(seq_file, "GP0 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP0:
+ seq_printf(seq_file, "PP0 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP1:
+ seq_printf(seq_file, "PP1 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP2:
+ seq_printf(seq_file, "PP2 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP3:
+ seq_printf(seq_file, "PP3 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP4:
+ seq_printf(seq_file, "PP4 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP5:
+ seq_printf(seq_file, "PP5 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP6:
+ seq_printf(seq_file, "PP6 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_PP7:
+ seq_printf(seq_file, "PP7 | ");
+ break;
+ case MALI_PROFILING_EVENT_CHANNEL_GPU:
+ seq_printf(seq_file, "GPU | ");
+ break;
+ default:
+ seq_printf(seq_file, "0x%02X | ", (event_id & 0x00FF0000) >> 16);
+ break;
+ }
+
+ if (MALI_EVENT_ID_IS_HW(event_id)) {
+ if (((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_START) || ((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_STOP)) {
+ switch (event_id & 0x0000FFFF) {
+ case MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL:
+ seq_printf(seq_file, "PHYSICAL | ");
+ break;
+ case MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL:
+ seq_printf(seq_file, "VIRTUAL | ");
+ break;
+ default:
+ seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+ break;
+ }
+ } else {
+ seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+ }
+ } else {
+ seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF);
+ }
+
+ seq_printf(seq_file, "T0 + 0x%016llX\n", timestamp - start_time);
+
+ return 0;
+ }
+
+ return 0;
+}
+
+static const struct seq_operations profiling_events_seq_ops = {
+ .start = profiling_events_start,
+ .next = profiling_events_next,
+ .stop = profiling_events_stop,
+ .show = profiling_events_show
+};
+
+static int profiling_events_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &profiling_events_seq_ops);
+}
+
+static const struct file_operations profiling_events_fops = {
+ .owner = THIS_MODULE,
+ .open = profiling_events_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static const struct seq_operations profiling_events_human_readable_seq_ops = {
+ .start = profiling_events_start,
+ .next = profiling_events_next,
+ .stop = profiling_events_stop,
+ .show = profiling_events_show_human_readable
+};
+
+static int profiling_events_human_readable_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &profiling_events_human_readable_seq_ops);
+}
+
+static const struct file_operations profiling_events_human_readable_fops = {
+ .owner = THIS_MODULE,
+ .open = profiling_events_human_readable_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+#endif
+
+static int memory_debugfs_show(struct seq_file *s, void *private_data)
+{
+#ifdef MALI_MEM_SWAP_TRACKING
+ seq_printf(s, " %-25s %-10s %-10s %-15s %-15s %-10s %-10s %-10s \n"\
+ "=================================================================================================================================\n",
+ "Name (:bytes)", "pid", "mali_mem", "max_mali_mem",
+ "external_mem", "ump_mem", "dma_mem", "swap_mem");
+#else
+ seq_printf(s, " %-25s %-10s %-10s %-15s %-15s %-10s %-10s \n"\
+ "========================================================================================================================\n",
+ "Name (:bytes)", "pid", "mali_mem", "max_mali_mem",
+ "external_mem", "ump_mem", "dma_mem");
+#endif
+ mali_session_memory_tracking(s);
+ return 0;
+}
+
+static int memory_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, memory_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations memory_usage_fops = {
+ .owner = THIS_MODULE,
+ .open = memory_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t utilization_gp_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 uval = _mali_ukk_utilization_gp_pp();
+
+ r = snprintf(buf, 64, "%u\n", uval);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t utilization_gp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 uval = _mali_ukk_utilization_gp();
+
+ r = snprintf(buf, 64, "%u\n", uval);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t utilization_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 uval = _mali_ukk_utilization_pp();
+
+ r = snprintf(buf, 64, "%u\n", uval);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+
+static const struct file_operations utilization_gp_pp_fops = {
+ .owner = THIS_MODULE,
+ .read = utilization_gp_pp_read,
+};
+
+static const struct file_operations utilization_gp_fops = {
+ .owner = THIS_MODULE,
+ .read = utilization_gp_read,
+};
+
+static const struct file_operations utilization_pp_fops = {
+ .owner = THIS_MODULE,
+ .read = utilization_pp_read,
+};
+
+static ssize_t user_settings_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ unsigned long val;
+ int ret;
+ _mali_uk_user_setting_t setting;
+ char buf[32];
+
+ cnt = min(cnt, sizeof(buf) - 1);
+ if (copy_from_user(buf, ubuf, cnt)) {
+ return -EFAULT;
+ }
+ buf[cnt] = '\0';
+
+ ret = kstrtoul(buf, 10, &val);
+ if (0 != ret) {
+ return ret;
+ }
+
+ /* Update setting */
+ setting = (_mali_uk_user_setting_t)(filp->private_data);
+ mali_set_user_setting(setting, val);
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t user_settings_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 value;
+ _mali_uk_user_setting_t setting;
+
+ setting = (_mali_uk_user_setting_t)(filp->private_data);
+ value = mali_get_user_setting(setting);
+
+ r = snprintf(buf, 64, "%u\n", value);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations user_settings_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = user_settings_read,
+ .write = user_settings_write,
+};
+
+static int mali_sysfs_user_settings_register(void)
+{
+ struct dentry *mali_user_settings_dir = debugfs_create_dir("userspace_settings", mali_debugfs_dir);
+
+ if (mali_user_settings_dir != NULL) {
+ long i;
+ for (i = 0; i < _MALI_UK_USER_SETTING_MAX; i++) {
+ debugfs_create_file(_mali_uk_user_setting_descriptions[i],
+ 0600, mali_user_settings_dir, (void *)i,
+ &user_settings_fops);
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t pp_num_cores_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int ret;
+ char buffer[32];
+ unsigned long val;
+
+ if (count >= sizeof(buffer)) {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count)) {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ ret = kstrtoul(&buffer[0], 10, &val);
+ if (0 != ret) {
+ return -EINVAL;
+ }
+
+ ret = mali_executor_set_perf_level(val, MALI_TRUE); /* override even if core scaling is disabled */
+ if (ret) {
+ return ret;
+ }
+
+ *offp += count;
+ return count;
+}
+
+static ssize_t pp_num_cores_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+
+ r = snprintf(buffer, 64, "%u\n", mali_executor_get_num_cores_enabled());
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations pp_num_cores_enabled_fops = {
+ .owner = THIS_MODULE,
+ .write = pp_num_cores_enabled_write,
+ .read = pp_num_cores_enabled_read,
+ .llseek = default_llseek,
+};
+
+static ssize_t pp_num_cores_total_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r;
+ char buffer[64];
+
+ r = snprintf(buffer, 64, "%u\n", mali_executor_get_num_cores_total());
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations pp_num_cores_total_fops = {
+ .owner = THIS_MODULE,
+ .read = pp_num_cores_total_read,
+};
+
+static ssize_t pp_core_scaling_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
+{
+ int ret;
+ char buffer[32];
+ unsigned long val;
+
+ if (count >= sizeof(buffer)) {
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&buffer[0], buf, count)) {
+ return -EFAULT;
+ }
+ buffer[count] = '\0';
+
+ ret = kstrtoul(&buffer[0], 10, &val);
+ if (0 != ret) {
+ return -EINVAL;
+ }
+
+ switch (val) {
+ case 1:
+ mali_executor_core_scaling_enable();
+ break;
+ case 0:
+ mali_executor_core_scaling_disable();
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ *offp += count;
+ return count;
+}
+
+static ssize_t pp_core_scaling_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ return simple_read_from_buffer(buf, count, offp, mali_executor_core_scaling_is_enabled() ? "1\n" : "0\n", 2);
+}
+static const struct file_operations pp_core_scaling_enabled_fops = {
+ .owner = THIS_MODULE,
+ .write = pp_core_scaling_enabled_write,
+ .read = pp_core_scaling_enabled_read,
+ .llseek = default_llseek,
+};
+
+static ssize_t version_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
+{
+ int r = 0;
+ char buffer[64];
+
+ switch (mali_kernel_core_get_product_id()) {
+ case _MALI_PRODUCT_ID_MALI200:
+ r = snprintf(buffer, 64, "Mali-200\n");
+ break;
+ case _MALI_PRODUCT_ID_MALI300:
+ r = snprintf(buffer, 64, "Mali-300\n");
+ break;
+ case _MALI_PRODUCT_ID_MALI400:
+ r = snprintf(buffer, 64, "Mali-400 MP\n");
+ break;
+ case _MALI_PRODUCT_ID_MALI450:
+ r = snprintf(buffer, 64, "Mali-450 MP\n");
+ break;
+ case _MALI_PRODUCT_ID_MALI470:
+ r = snprintf(buffer, 64, "Mali-470 MP\n");
+ break;
+ case _MALI_PRODUCT_ID_UNKNOWN:
+ return -EINVAL;
+ break;
+ };
+
+ return simple_read_from_buffer(buf, count, offp, buffer, r);
+}
+
+static const struct file_operations version_fops = {
+ .owner = THIS_MODULE,
+ .read = version_read,
+};
+
+#if defined(DEBUG)
+static int timeline_debugfs_show(struct seq_file *s, void *private_data)
+{
+ struct mali_session_data *session, *tmp;
+ u32 session_seq = 1;
+
+ seq_printf(s, "timeline system info: \n=================\n\n");
+
+ mali_session_lock();
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ seq_printf(s, "session %d <%p> start:\n", session_seq, session);
+ mali_timeline_debug_print_system(session->timeline_system, s);
+ seq_printf(s, "session %d end\n\n\n", session_seq++);
+ }
+ mali_session_unlock();
+
+ return 0;
+}
+
+static int timeline_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, timeline_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations timeline_dump_fops = {
+ .owner = THIS_MODULE,
+ .open = timeline_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+#endif
+
+int mali_sysfs_register(const char *mali_dev_name)
+{
+ mali_debugfs_dir = debugfs_create_dir(mali_dev_name, NULL);
+ if (ERR_PTR(-ENODEV) == mali_debugfs_dir) {
+ /* Debugfs not supported. */
+ mali_debugfs_dir = NULL;
+ } else {
+ if (NULL != mali_debugfs_dir) {
+ /* Debugfs directory created successfully; create files now */
+ struct dentry *mali_power_dir;
+ struct dentry *mali_gp_dir;
+ struct dentry *mali_pp_dir;
+ struct dentry *mali_l2_dir;
+ struct dentry *mali_profiling_dir;
+
+ debugfs_create_file("version", 0400, mali_debugfs_dir, NULL, &version_fops);
+
+ mali_power_dir = debugfs_create_dir("power", mali_debugfs_dir);
+ if (mali_power_dir != NULL) {
+ debugfs_create_file("always_on", 0600, mali_power_dir, NULL, &power_always_on_fops);
+ debugfs_create_file("power_events", 0200, mali_power_dir, NULL, &power_power_events_fops);
+ }
+
+ mali_gp_dir = debugfs_create_dir("gp", mali_debugfs_dir);
+ if (mali_gp_dir != NULL) {
+ u32 num_groups;
+ long i;
+
+ num_groups = mali_group_get_glob_num_groups();
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+ if (NULL != gp_core) {
+ struct dentry *mali_gp_gpx_dir;
+ mali_gp_gpx_dir = debugfs_create_dir("gp0", mali_gp_dir);
+ if (NULL != mali_gp_gpx_dir) {
+ debugfs_create_file("base_addr", 0400, mali_gp_gpx_dir, &gp_core->hw_core, &hw_core_base_addr_fops);
+ debugfs_create_file("enabled", 0600, mali_gp_gpx_dir, group, &group_enabled_fops);
+ }
+ break; /* no need to look for any other GP cores */
+ }
+
+ }
+ }
+
+ mali_pp_dir = debugfs_create_dir("pp", mali_debugfs_dir);
+ if (mali_pp_dir != NULL) {
+ u32 num_groups;
+ long i;
+
+ debugfs_create_file("num_cores_total", 0400, mali_pp_dir, NULL, &pp_num_cores_total_fops);
+ debugfs_create_file("num_cores_enabled", 0600, mali_pp_dir, NULL, &pp_num_cores_enabled_fops);
+ debugfs_create_file("core_scaling_enabled", 0600, mali_pp_dir, NULL, &pp_core_scaling_enabled_fops);
+
+ num_groups = mali_group_get_glob_num_groups();
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+ if (NULL != pp_core) {
+ char buf[16];
+ struct dentry *mali_pp_ppx_dir;
+ _mali_osk_snprintf(buf, sizeof(buf), "pp%u", mali_pp_core_get_id(pp_core));
+ mali_pp_ppx_dir = debugfs_create_dir(buf, mali_pp_dir);
+ if (NULL != mali_pp_ppx_dir) {
+ debugfs_create_file("base_addr", 0400, mali_pp_ppx_dir, &pp_core->hw_core, &hw_core_base_addr_fops);
+ if (!mali_group_is_virtual(group)) {
+ debugfs_create_file("enabled", 0600, mali_pp_ppx_dir, group, &group_enabled_fops);
+ }
+ }
+ }
+ }
+ }
+
+ mali_l2_dir = debugfs_create_dir("l2", mali_debugfs_dir);
+ if (mali_l2_dir != NULL) {
+ struct dentry *mali_l2_all_dir;
+ u32 l2_id;
+ struct mali_l2_cache_core *l2_cache;
+
+ mali_l2_all_dir = debugfs_create_dir("all", mali_l2_dir);
+ if (mali_l2_all_dir != NULL) {
+ debugfs_create_file("counter_src0", 0200, mali_l2_all_dir, NULL, &l2_all_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0200, mali_l2_all_dir, NULL, &l2_all_counter_src1_fops);
+ }
+
+ l2_id = 0;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ while (NULL != l2_cache) {
+ char buf[16];
+ struct dentry *mali_l2_l2x_dir;
+ _mali_osk_snprintf(buf, sizeof(buf), "l2%u", l2_id);
+ mali_l2_l2x_dir = debugfs_create_dir(buf, mali_l2_dir);
+ if (NULL != mali_l2_l2x_dir) {
+ debugfs_create_file("counter_src0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src0_fops);
+ debugfs_create_file("counter_src1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src1_fops);
+ debugfs_create_file("counter_val0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_val0_fops);
+ debugfs_create_file("counter_val1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_val1_fops);
+ debugfs_create_file("base_addr", 0400, mali_l2_l2x_dir, &l2_cache->hw_core, &hw_core_base_addr_fops);
+ }
+
+ /* try next L2 */
+ l2_id++;
+ l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
+ }
+ }
+
+ debugfs_create_file("gpu_memory", 0444, mali_debugfs_dir, NULL, &memory_usage_fops);
+
+ debugfs_create_file("utilization_gp_pp", 0400, mali_debugfs_dir, NULL, &utilization_gp_pp_fops);
+ debugfs_create_file("utilization_gp", 0400, mali_debugfs_dir, NULL, &utilization_gp_fops);
+ debugfs_create_file("utilization_pp", 0400, mali_debugfs_dir, NULL, &utilization_pp_fops);
+
+ mali_profiling_dir = debugfs_create_dir("profiling", mali_debugfs_dir);
+ if (mali_profiling_dir != NULL) {
+ u32 max_sub_jobs;
+ long i;
+ struct dentry *mali_profiling_gp_dir;
+ struct dentry *mali_profiling_pp_dir;
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ struct dentry *mali_profiling_proc_dir;
+#endif
+ /*
+ * Create directory where we can set GP HW counters.
+ */
+ mali_profiling_gp_dir = debugfs_create_dir("gp", mali_profiling_dir);
+ if (mali_profiling_gp_dir != NULL) {
+ debugfs_create_file("counter_src0", 0600, mali_profiling_gp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_GP(0), &profiling_counter_src_fops);
+ debugfs_create_file("counter_src1", 0600, mali_profiling_gp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_GP(1), &profiling_counter_src_fops);
+ }
+
+ /*
+ * Create directory where we can set PP HW counters.
+ * Possible override with specific HW counters for a particular sub job
+ * (Disable core scaling before using the override!)
+ */
+ mali_profiling_pp_dir = debugfs_create_dir("pp", mali_profiling_dir);
+ if (mali_profiling_pp_dir != NULL) {
+ debugfs_create_file("counter_src0", 0600, mali_profiling_pp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP(0), &profiling_counter_src_fops);
+ debugfs_create_file("counter_src1", 0600, mali_profiling_pp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP(1), &profiling_counter_src_fops);
+ }
+
+ max_sub_jobs = mali_executor_get_num_cores_total();
+ for (i = 0; i < max_sub_jobs; i++) {
+ char buf[16];
+ struct dentry *mali_profiling_pp_x_dir;
+ _mali_osk_snprintf(buf, sizeof(buf), "%u", i);
+ mali_profiling_pp_x_dir = debugfs_create_dir(buf, mali_profiling_pp_dir);
+ if (NULL != mali_profiling_pp_x_dir) {
+ debugfs_create_file("counter_src0",
+ 0600, mali_profiling_pp_x_dir,
+ (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(0, i),
+ &profiling_counter_src_fops);
+ debugfs_create_file("counter_src1",
+ 0600, mali_profiling_pp_x_dir,
+ (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(1, i),
+ &profiling_counter_src_fops);
+ }
+ }
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ mali_profiling_proc_dir = debugfs_create_dir("proc", mali_profiling_dir);
+ if (mali_profiling_proc_dir != NULL) {
+ struct dentry *mali_profiling_proc_default_dir = debugfs_create_dir("default", mali_profiling_proc_dir);
+ if (mali_profiling_proc_default_dir != NULL) {
+ debugfs_create_file("enable", 0600, mali_profiling_proc_default_dir, (void *)_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, &user_settings_fops);
+ }
+ }
+ debugfs_create_file("record", 0600, mali_profiling_dir, NULL, &profiling_record_fops);
+ debugfs_create_file("events", 0400, mali_profiling_dir, NULL, &profiling_events_fops);
+ debugfs_create_file("events_human_readable", 0400, mali_profiling_dir, NULL, &profiling_events_human_readable_fops);
+#endif
+ }
+
+#if MALI_STATE_TRACKING
+ debugfs_create_file("state_dump", 0400, mali_debugfs_dir, NULL, &mali_seq_internal_state_fops);
+#endif
+
+#if defined(DEBUG)
+ debugfs_create_file("timeline_dump", 0400, mali_debugfs_dir, NULL, &timeline_dump_fops);
+#endif
+ if (mali_sysfs_user_settings_register()) {
+ /* Failed to create the debugfs entries for the user settings DB. */
+ MALI_DEBUG_PRINT(2, ("Failed to create user setting debugfs files. Ignoring...\n"));
+ }
+ }
+ }
+
+ /* Success! */
+ return 0;
+}
+
+int mali_sysfs_unregister(void)
+{
+ if (NULL != mali_debugfs_dir) {
+ debugfs_remove_recursive(mali_debugfs_dir);
+ }
+ return 0;
+}
+
+#else /* MALI_LICENSE_IS_GPL */
+
+/* Dummy implementations for non-GPL */
+
+int mali_sysfs_register(struct mali_dev *device, dev_t dev, const char *mali_dev_name)
+{
+ return 0;
+}
+
+int mali_sysfs_unregister(void)
+{
+ return 0;
+}
+
+#endif /* MALI_LICENSE_IS_GPL */
diff --git a/drivers/gpu/arm/utgard/linux/mali_kernel_sysfs.h b/drivers/gpu/arm/utgard/linux/mali_kernel_sysfs.h
new file mode 100644
index 000000000000..a36a0cea9972
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_kernel_sysfs.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_SYSFS_H__
+#define __MALI_KERNEL_SYSFS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/device.h>
+
+#define MALI_PROC_DIR "driver/mali"
+
+int mali_sysfs_register(const char *mali_dev_name);
+int mali_sysfs_unregister(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_linux_trace.h b/drivers/gpu/arm/utgard/linux/mali_linux_trace.h
new file mode 100644
index 000000000000..c6cd2bfb7217
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_linux_trace.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#if !defined (MALI_LINUX_TRACE_H) || defined (TRACE_HEADER_MULTI_READ)
+#define MALI_LINUX_TRACE_H
+
+#include <linux/types.h>
+
+#include <linux/stringify.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+#define TRACE_SYSTEM_STRING __stringfy(TRACE_SYSTEM)
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE mali_linux_trace
+
+/**
+ * Define the tracepoint used to communicate the status of a GPU. Called
+ * when a GPU turns on or turns off.
+ *
+ * @param event_id The type of the event. This parameter is a bitfield
+ * encoding the type of the event.
+ *
+ * @param d0 First data parameter.
+ * @param d1 Second data parameter.
+ * @param d2 Third data parameter.
+ * @param d3 Fourth data parameter.
+ * @param d4 Fifth data parameter.
+ */
+TRACE_EVENT(mali_timeline_event,
+
+ TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1,
+ unsigned int d2, unsigned int d3, unsigned int d4),
+
+ TP_ARGS(event_id, d0, d1, d2, d3, d4),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, event_id)
+ __field(unsigned int, d0)
+ __field(unsigned int, d1)
+ __field(unsigned int, d2)
+ __field(unsigned int, d3)
+ __field(unsigned int, d4)
+ ),
+
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ __entry->d0 = d0;
+ __entry->d1 = d1;
+ __entry->d2 = d2;
+ __entry->d3 = d3;
+ __entry->d4 = d4;
+ ),
+
+ TP_printk("event=%d", __entry->event_id)
+ );
+
+/**
+ * Define a tracepoint used to regsiter the value of a hardware counter.
+ * Hardware counters belonging to the vertex or fragment processor are
+ * reported via this tracepoint each frame, whilst L2 cache hardware
+ * counters are reported continuously.
+ *
+ * @param counter_id The counter ID.
+ * @param value The value of the counter.
+ */
+TRACE_EVENT(mali_hw_counter,
+
+ TP_PROTO(unsigned int counter_id, unsigned int value),
+
+ TP_ARGS(counter_id, value),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, counter_id)
+ __field(unsigned int, value)
+ ),
+
+ TP_fast_assign(
+ __entry->counter_id = counter_id;
+ ),
+
+ TP_printk("event %d = %d", __entry->counter_id, __entry->value)
+ );
+
+/**
+ * Define a tracepoint used to send a bundle of software counters.
+ *
+ * @param counters The bundle of counters.
+ */
+TRACE_EVENT(mali_sw_counters,
+
+ TP_PROTO(pid_t pid, pid_t tid, void *surface_id, unsigned int *counters),
+
+ TP_ARGS(pid, tid, surface_id, counters),
+
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(pid_t, tid)
+ __field(void *, surface_id)
+ __field(unsigned int *, counters)
+ ),
+
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->tid = tid;
+ __entry->surface_id = surface_id;
+ __entry->counters = counters;
+ ),
+
+ TP_printk("counters were %s", __entry->counters == NULL ? "NULL" : "not NULL")
+ );
+
+/**
+ * Define a tracepoint used to gather core activity for systrace
+ * @param pid The process id for which the core activity originates from
+ * @param active If the core is active (1) or not (0)
+ * @param core_type The type of core active, either GP (1) or PP (0)
+ * @param core_id The core id that is active for the core_type
+ * @param frame_builder_id The frame builder id associated with this core activity
+ * @param flush_id The flush id associated with this core activity
+ */
+TRACE_EVENT(mali_core_active,
+
+ TP_PROTO(pid_t pid, unsigned int active, unsigned int core_type, unsigned int core_id, unsigned int frame_builder_id, unsigned int flush_id),
+
+ TP_ARGS(pid, active, core_type, core_id, frame_builder_id, flush_id),
+
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(unsigned int, active)
+ __field(unsigned int, core_type)
+ __field(unsigned int, core_id)
+ __field(unsigned int, frame_builder_id)
+ __field(unsigned int, flush_id)
+ ),
+
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->active = active;
+ __entry->core_type = core_type;
+ __entry->core_id = core_id;
+ __entry->frame_builder_id = frame_builder_id;
+ __entry->flush_id = flush_id;
+ ),
+
+ TP_printk("%s|%d|%s%i:%x|%d", __entry->active ? "S" : "F", __entry->pid, __entry->core_type ? "GP" : "PP", __entry->core_id, __entry->flush_id, __entry->frame_builder_id)
+ );
+
+#endif /* MALI_LINUX_TRACE_H */
+
+/* This part must exist outside the header guard. */
+#include <trace/define_trace.h>
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory.c b/drivers/gpu/arm/utgard/linux/mali_memory.c
new file mode 100644
index 000000000000..c45f6ee25887
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory.c
@@ -0,0 +1,510 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+#include <linux/idr.h>
+
+#include "mali_osk.h"
+#include "mali_executor.h"
+
+#include "mali_memory.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_memory_util.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_manager.h"
+#include "mali_memory_cow.h"
+#include "mali_memory_swap_alloc.h"
+#include "mali_memory_defer_bind.h"
+
+extern unsigned int mali_dedicated_mem_size;
+extern unsigned int mali_shared_mem_size;
+
+#define MALI_VM_NUM_FAULT_PREFETCH (0x8)
+
+static void mali_mem_vma_open(struct vm_area_struct *vma)
+{
+ mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
+ MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
+
+ /* If need to share the allocation, add ref_count here */
+ mali_allocation_ref(alloc);
+ return;
+}
+static void mali_mem_vma_close(struct vm_area_struct *vma)
+{
+ /* If need to share the allocation, unref ref_count here */
+ mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
+
+ mali_allocation_unref(&alloc);
+ vma->vm_private_data = NULL;
+}
+
+static int mali_mem_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data;
+ mali_mem_backend *mem_bkend = NULL;
+ int ret;
+ int prefetch_num = MALI_VM_NUM_FAULT_PREFETCH;
+
+ unsigned long address = (unsigned long)vmf->virtual_address;
+ MALI_DEBUG_ASSERT(alloc->backend_handle);
+ MALI_DEBUG_ASSERT((unsigned long)alloc->cpu_mapping.addr <= address);
+
+ /* Get backend memory & Map on CPU */
+ mutex_lock(&mali_idr_mutex);
+ if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) {
+ MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n"));
+ mutex_unlock(&mali_idr_mutex);
+ return VM_FAULT_SIGBUS;
+ }
+ mutex_unlock(&mali_idr_mutex);
+ MALI_DEBUG_ASSERT(mem_bkend->type == alloc->type);
+
+ if ((mem_bkend->type == MALI_MEM_COW && (MALI_MEM_BACKEND_FLAG_SWAP_COWED !=
+ (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) &&
+ (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE)) {
+ /*check if use page fault to do COW*/
+ MALI_DEBUG_PRINT(4, ("mali_vma_fault: do cow allocate on demand!, address=0x%x\n", address));
+ mutex_lock(&mem_bkend->mutex);
+ ret = mali_mem_cow_allocate_on_demand(mem_bkend,
+ (address - vma->vm_start) / PAGE_SIZE);
+ mutex_unlock(&mem_bkend->mutex);
+
+ if (ret != _MALI_OSK_ERR_OK) {
+ return VM_FAULT_OOM;
+ }
+ prefetch_num = 1;
+
+ /* handle COW modified range cpu mapping
+ we zap the mapping in cow_modify_range, it will trigger page fault
+ when CPU access it, so here we map it to CPU*/
+ mutex_lock(&mem_bkend->mutex);
+ ret = mali_mem_cow_cpu_map_pages_locked(mem_bkend, vma, address, prefetch_num);
+ mutex_unlock(&mem_bkend->mutex);
+
+ if (unlikely(ret != _MALI_OSK_ERR_OK)) {
+ return VM_FAULT_SIGBUS;
+ }
+ } else if ((mem_bkend->type == MALI_MEM_SWAP) ||
+ (mem_bkend->type == MALI_MEM_COW && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
+ u32 offset_in_bkend = (address - vma->vm_start) / PAGE_SIZE;
+ int ret = _MALI_OSK_ERR_OK;
+
+ mutex_lock(&mem_bkend->mutex);
+ if (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE) {
+ ret = mali_mem_swap_cow_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page);
+ } else {
+ ret = mali_mem_swap_allocate_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page);
+ }
+ mutex_unlock(&mem_bkend->mutex);
+
+ if (ret != _MALI_OSK_ERR_OK) {
+ MALI_DEBUG_PRINT(2, ("Mali swap memory page fault process failed, address=0x%x\n", address));
+ return VM_FAULT_OOM;
+ } else {
+ return VM_FAULT_LOCKED;
+ }
+ } else {
+ MALI_DEBUG_ASSERT(0);
+ /*NOT support yet*/
+ }
+ return VM_FAULT_NOPAGE;
+}
+
+static struct vm_operations_struct mali_kernel_vm_ops = {
+ .open = mali_mem_vma_open,
+ .close = mali_mem_vma_close,
+ .fault = mali_mem_vma_fault,
+};
+
+
+/** @ map mali allocation to CPU address
+*
+* Supported backend types:
+* --MALI_MEM_OS
+* -- need to add COW?
+ *Not supported backend types:
+* -_MALI_MEMORY_BIND_BACKEND_UMP
+* -_MALI_MEMORY_BIND_BACKEND_DMA_BUF
+* -_MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY
+*
+*/
+int mali_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct mali_session_data *session;
+ mali_mem_allocation *mali_alloc = NULL;
+ u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT;
+ struct mali_vma_node *mali_vma_node = NULL;
+ mali_mem_backend *mem_bkend = NULL;
+ int ret = -EFAULT;
+
+ session = (struct mali_session_data *)filp->private_data;
+ if (NULL == session) {
+ MALI_PRINT_ERROR(("mmap called without any session data available\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n",
+ (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT),
+ (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));
+
+ /* Operations used on any memory system */
+ /* do not need to anything in vm open/close now */
+
+ /* find mali allocation structure by vaddress*/
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+ if (likely(mali_vma_node)) {
+ mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+ MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start);
+ if (unlikely(mali_addr != mali_vma_node->vm_node.start)) {
+ /* only allow to use start address for mmap */
+ MALI_DEBUG_PRINT(1, ("mali_addr != mali_vma_node->vm_node.start\n"));
+ return -EFAULT;
+ }
+ } else {
+ MALI_DEBUG_ASSERT(NULL == mali_vma_node);
+ return -EFAULT;
+ }
+
+ mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start;
+
+ if (mali_alloc->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
+ MALI_DEBUG_PRINT(1, ("ERROR : trying to access varying memory by CPU!\n"));
+ return -EFAULT;
+ }
+
+ /* Get backend memory & Map on CPU */
+ mutex_lock(&mali_idr_mutex);
+ if (!(mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle))) {
+ MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n"));
+ mutex_unlock(&mali_idr_mutex);
+ return -EFAULT;
+ }
+ mutex_unlock(&mali_idr_mutex);
+
+ if (!(MALI_MEM_SWAP == mali_alloc->type ||
+ (MALI_MEM_COW == mali_alloc->type && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) {
+ /* Set some bits which indicate that, the memory is IO memory, meaning
+ * that no paging is to be performed and the memory should not be
+ * included in crash dumps. And that the memory is reserved, meaning
+ * that it's present and can never be paged out (see also previous
+ * entry)
+ */
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_DONTCOPY;
+ vma->vm_flags |= VM_PFNMAP;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
+ vma->vm_flags |= VM_RESERVED;
+#else
+ vma->vm_flags |= VM_DONTDUMP;
+ vma->vm_flags |= VM_DONTEXPAND;
+#endif
+ } else if (MALI_MEM_SWAP == mali_alloc->type) {
+ vma->vm_pgoff = mem_bkend->start_idx;
+ }
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &mali_kernel_vm_ops;
+
+ mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start;
+
+ /* If it's a copy-on-write mapping, map to read only */
+ if (!(vma->vm_flags & VM_WRITE)) {
+ MALI_DEBUG_PRINT(4, ("mmap allocation with read only !\n"));
+ /* add VM_WRITE for do_page_fault will check this when a write fault */
+ vma->vm_flags |= VM_WRITE | VM_READ;
+ vma->vm_page_prot = PAGE_READONLY;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE;
+ goto out;
+ }
+
+ if (mem_bkend->type == MALI_MEM_OS) {
+ ret = mali_mem_os_cpu_map(mem_bkend, vma);
+ } else if (mem_bkend->type == MALI_MEM_COW &&
+ (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
+ ret = mali_mem_cow_cpu_map(mem_bkend, vma);
+ } else if (mem_bkend->type == MALI_MEM_BLOCK) {
+ ret = mali_mem_block_cpu_map(mem_bkend, vma);
+ } else if ((mem_bkend->type == MALI_MEM_SWAP) || (mem_bkend->type == MALI_MEM_COW &&
+ (MALI_MEM_BACKEND_FLAG_SWAP_COWED == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) {
+ /*For swappable memory, CPU page table will be created by page fault handler. */
+ ret = 0;
+ } else {
+ /* Not support yet*/
+ MALI_DEBUG_ASSERT(0);
+ }
+
+ if (ret != 0) {
+ MALI_DEBUG_PRINT(1, ("ret != 0\n"));
+ return -EFAULT;
+ }
+out:
+ MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == mali_alloc->magic);
+
+ vma->vm_private_data = (void *)mali_alloc;
+ mali_alloc->cpu_mapping.vma = vma;
+
+ mali_allocation_ref(mali_alloc);
+
+ return 0;
+}
+
+_mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor)
+{
+ u32 size = descriptor->psize;
+ struct mali_session_data *session = descriptor->session;
+
+ MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+ /* Map dma-buf into this session's page tables */
+
+ if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+ size += MALI_MMU_PAGE_SIZE;
+ }
+
+ return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start, size);
+}
+
+_mali_osk_errcode_t mali_mem_mali_map_resize(mali_mem_allocation *descriptor, u32 new_size)
+{
+ u32 old_size = descriptor->psize;
+ struct mali_session_data *session = descriptor->session;
+
+ MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic);
+
+ if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+ new_size += MALI_MMU_PAGE_SIZE;
+ }
+
+ if (new_size > old_size) {
+ MALI_DEBUG_ASSERT(new_size <= descriptor->mali_vma_node.vm_node.size);
+ return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start + old_size, new_size - old_size);
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_mali_map_free(struct mali_session_data *session, u32 size, mali_address_t vaddr, u32 flags)
+{
+ if (flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+ size += MALI_MMU_PAGE_SIZE;
+ }
+
+ /* Umap and flush L2 */
+ mali_mmu_pagedir_unmap(session->page_directory, vaddr, size);
+ mali_executor_zap_all_active(session);
+}
+
+u32 _mali_ukk_report_memory_usage(void)
+{
+ u32 sum = 0;
+
+ if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
+ sum += mali_mem_block_allocator_stat();
+ }
+
+ sum += mali_mem_os_stat();
+
+ return sum;
+}
+
+u32 _mali_ukk_report_total_memory_size(void)
+{
+ return mali_dedicated_mem_size + mali_shared_mem_size;
+}
+
+
+/**
+ * Per-session memory descriptor mapping table sizes
+ */
+#define MALI_MEM_DESCRIPTORS_INIT 64
+#define MALI_MEM_DESCRIPTORS_MAX 65536
+
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *session_data)
+{
+ MALI_DEBUG_PRINT(5, ("Memory session begin\n"));
+
+ session_data->memory_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_MEM_SESSION);
+
+ if (NULL == session_data->memory_lock) {
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ mali_memory_manager_init(&session_data->allocation_mgr);
+
+ MALI_DEBUG_PRINT(5, ("MMU session begin: success\n"));
+ MALI_SUCCESS;
+}
+
+void mali_memory_session_end(struct mali_session_data *session)
+{
+ MALI_DEBUG_PRINT(3, ("MMU session end\n"));
+
+ if (NULL == session) {
+ MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
+ return;
+ }
+ /* free allocation */
+ mali_free_session_allocations(session);
+ /* do some check in unint*/
+ mali_memory_manager_uninit(&session->allocation_mgr);
+
+ /* Free the lock */
+ _mali_osk_mutex_term(session->memory_lock);
+
+ return;
+}
+
+_mali_osk_errcode_t mali_memory_initialize(void)
+{
+ _mali_osk_errcode_t err;
+
+ idr_init(&mali_backend_idr);
+ mutex_init(&mali_idr_mutex);
+
+ err = mali_mem_swap_init();
+ if (err != _MALI_OSK_ERR_OK) {
+ return err;
+ }
+ err = mali_mem_os_init();
+ if (_MALI_OSK_ERR_OK == err) {
+ err = mali_mem_defer_bind_manager_init();
+ }
+
+ return err;
+}
+
+void mali_memory_terminate(void)
+{
+ mali_mem_swap_term();
+ mali_mem_defer_bind_manager_destory();
+ mali_mem_os_term();
+ if (mali_memory_have_dedicated_memory()) {
+ mali_mem_block_allocator_destroy();
+ }
+}
+
+
+struct mali_page_node *_mali_page_node_allocate(mali_page_node_type type)
+{
+ mali_page_node *page_node = NULL;
+
+ page_node = kzalloc(sizeof(mali_page_node), GFP_KERNEL);
+ MALI_DEBUG_ASSERT(NULL != page_node);
+
+ if (page_node) {
+ page_node->type = type;
+ INIT_LIST_HEAD(&page_node->list);
+ }
+
+ return page_node;
+}
+
+void _mali_page_node_ref(struct mali_page_node *node)
+{
+ if (node->type == MALI_PAGE_NODE_OS) {
+ /* add ref to this page */
+ get_page(node->page);
+ } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+ mali_mem_block_add_ref(node);
+ } else if (node->type == MALI_PAGE_NODE_SWAP) {
+ atomic_inc(&node->swap_it->ref_count);
+ } else
+ MALI_DEBUG_ASSERT(0);
+}
+
+void _mali_page_node_unref(struct mali_page_node *node)
+{
+ if (node->type == MALI_PAGE_NODE_OS) {
+ /* unref to this page */
+ put_page(node->page);
+ } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+ mali_mem_block_dec_ref(node);
+ } else
+ MALI_DEBUG_ASSERT(0);
+}
+
+
+void _mali_page_node_add_page(struct mali_page_node *node, struct page *page)
+{
+ MALI_DEBUG_ASSERT(MALI_PAGE_NODE_OS == node->type);
+ node->page = page;
+}
+
+
+void _mali_page_node_add_swap_item(struct mali_page_node *node, struct mali_swap_item *item)
+{
+ MALI_DEBUG_ASSERT(MALI_PAGE_NODE_SWAP == node->type);
+ node->swap_it = item;
+}
+
+void _mali_page_node_add_block_item(struct mali_page_node *node, mali_block_item *item)
+{
+ MALI_DEBUG_ASSERT(MALI_PAGE_NODE_BLOCK == node->type);
+ node->blk_it = item;
+}
+
+
+int _mali_page_node_get_ref_count(struct mali_page_node *node)
+{
+ if (node->type == MALI_PAGE_NODE_OS) {
+ /* get ref count of this page */
+ return page_count(node->page);
+ } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+ return mali_mem_block_get_ref_count(node);
+ } else if (node->type == MALI_PAGE_NODE_SWAP) {
+ return atomic_read(&node->swap_it->ref_count);
+ } else {
+ MALI_DEBUG_ASSERT(0);
+ }
+ return -1;
+}
+
+
+dma_addr_t _mali_page_node_get_dma_addr(struct mali_page_node *node)
+{
+ if (node->type == MALI_PAGE_NODE_OS) {
+ return page_private(node->page);
+ } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+ return _mali_blk_item_get_phy_addr(node->blk_it);
+ } else if (node->type == MALI_PAGE_NODE_SWAP) {
+ return node->swap_it->dma_addr;
+ } else {
+ MALI_DEBUG_ASSERT(0);
+ }
+ return 0;
+}
+
+
+unsigned long _mali_page_node_get_pfn(struct mali_page_node *node)
+{
+ if (node->type == MALI_PAGE_NODE_OS) {
+ return page_to_pfn(node->page);
+ } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+ /* get phy addr for BLOCK page*/
+ return _mali_blk_item_get_pfn(node->blk_it);
+ } else if (node->type == MALI_PAGE_NODE_SWAP) {
+ return page_to_pfn(node->swap_it->page);
+ } else {
+ MALI_DEBUG_ASSERT(0);
+ }
+ return 0;
+}
+
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory.h b/drivers/gpu/arm/utgard/linux/mali_memory.h
new file mode 100644
index 000000000000..3140c6c98d2c
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_H__
+#define __MALI_MEMORY_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+
+#include <linux/list.h>
+#include <linux/mm.h>
+
+#include "mali_memory_types.h"
+#include "mali_memory_os_alloc.h"
+
+_mali_osk_errcode_t mali_memory_initialize(void);
+void mali_memory_terminate(void);
+
+/** @brief Allocate a page table page
+ *
+ * Allocate a page for use as a page directory or page table. The page is
+ * mapped into kernel space.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise an error code
+ * @param table_page GPU pointer to the allocated page
+ * @param mapping CPU pointer to the mapping of the allocated page
+ */
+MALI_STATIC_INLINE _mali_osk_errcode_t
+mali_mmu_get_table_page(mali_dma_addr *table_page, mali_io_address *mapping)
+{
+ return mali_mem_os_get_table_page(table_page, mapping);
+}
+
+/** @brief Release a page table page
+ *
+ * Release a page table page allocated through \a mali_mmu_get_table_page
+ *
+ * @param pa the GPU address of the page to release
+ */
+MALI_STATIC_INLINE void
+mali_mmu_release_table_page(mali_dma_addr phys, void *virt)
+{
+ mali_mem_os_release_table_page(phys, virt);
+}
+
+/** @brief mmap function
+ *
+ * mmap syscalls on the Mali device node will end up here.
+ *
+ * This function allocates Mali memory and maps it on CPU and Mali.
+ */
+int mali_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/** @brief Start a new memory session
+ *
+ * Called when a process opens the Mali device node.
+ *
+ * @param session Pointer to session to initialize
+ */
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *session);
+
+/** @brief Close a memory session
+ *
+ * Called when a process closes the Mali device node.
+ *
+ * Memory allocated by the session will be freed
+ *
+ * @param session Pointer to the session to terminate
+ */
+void mali_memory_session_end(struct mali_session_data *session);
+
+/** @brief Prepare Mali page tables for mapping
+ *
+ * This function will prepare the Mali page tables for mapping the memory
+ * described by \a descriptor.
+ *
+ * Page tables will be reference counted and allocated, if not yet present.
+ *
+ * @param descriptor Pointer to the memory descriptor to the mapping
+ */
+_mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor);
+
+/** @brief Resize Mali page tables for mapping
+ *
+ * This function will Resize the Mali page tables for mapping the memory
+ * described by \a descriptor.
+ *
+ * Page tables will be reference counted and allocated, if not yet present.
+ *
+ * @param descriptor Pointer to the memory descriptor to the mapping
+ * @param new_size The new size of descriptor
+ */
+_mali_osk_errcode_t mali_mem_mali_map_resize(mali_mem_allocation *descriptor, u32 new_size);
+
+/** @brief Free Mali page tables for mapping
+ *
+ * This function will unmap pages from Mali memory and free the page tables
+ * that are now unused.
+ *
+ * The updated pages in the Mali L2 cache will be invalidated, and the MMU TLBs will be zapped if necessary.
+ *
+ * @param descriptor Pointer to the memory descriptor to unmap
+ */
+void mali_mem_mali_map_free(struct mali_session_data *session, u32 size, mali_address_t vaddr, u32 flags);
+
+/** @brief Parse resource and prepare the OS memory allocator
+ *
+ * @param size Maximum size to allocate for Mali GPU.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size);
+
+/** @brief Parse resource and prepare the dedicated memory allocator
+ *
+ * @param start Physical start address of dedicated Mali GPU memory.
+ * @param size Size of dedicated Mali GPU memory.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size);
+
+
+struct mali_page_node *_mali_page_node_allocate(mali_page_node_type type);
+
+void _mali_page_node_ref(struct mali_page_node *node);
+void _mali_page_node_unref(struct mali_page_node *node);
+void _mali_page_node_add_page(struct mali_page_node *node, struct page *page);
+
+void _mali_page_node_add_block_item(struct mali_page_node *node, mali_block_item *item);
+
+void _mali_page_node_add_swap_item(struct mali_page_node *node, struct mali_swap_item *item);
+
+int _mali_page_node_get_ref_count(struct mali_page_node *node);
+dma_addr_t _mali_page_node_get_dma_addr(struct mali_page_node *node);
+unsigned long _mali_page_node_get_pfn(struct mali_page_node *node);
+
+#endif /* __MALI_MEMORY_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_block_alloc.c b/drivers/gpu/arm/utgard/linux/mali_memory_block_alloc.c
new file mode 100644
index 000000000000..453ddda3080f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_block_alloc.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_memory.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_osk.h"
+#include <linux/mutex.h>
+
+
+static mali_block_allocator *mali_mem_block_gobal_allocator = NULL;
+
+unsigned long _mali_blk_item_get_phy_addr(mali_block_item *item)
+{
+ return (item->phy_addr & ~(MALI_BLOCK_REF_MASK));
+}
+
+
+unsigned long _mali_blk_item_get_pfn(mali_block_item *item)
+{
+ return (item->phy_addr / MALI_BLOCK_SIZE);
+}
+
+
+u32 mali_mem_block_get_ref_count(mali_page_node *node)
+{
+ MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+ return (node->blk_it->phy_addr & MALI_BLOCK_REF_MASK);
+}
+
+
+/* Increase the refence count
+* It not atomic, so it need to get sp_lock before call this function
+*/
+
+u32 mali_mem_block_add_ref(mali_page_node *node)
+{
+ MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+ MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) < MALI_BLOCK_MAX_REF_COUNT);
+ return (node->blk_it->phy_addr++ & MALI_BLOCK_REF_MASK);
+}
+
+/* Decase the refence count
+* It not atomic, so it need to get sp_lock before call this function
+*/
+u32 mali_mem_block_dec_ref(mali_page_node *node)
+{
+ MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK);
+ MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) > 0);
+ return (node->blk_it->phy_addr-- & MALI_BLOCK_REF_MASK);
+}
+
+
+static mali_block_allocator *mali_mem_block_allocator_create(u32 base_address, u32 size)
+{
+ mali_block_allocator *info;
+ u32 usable_size;
+ u32 num_blocks;
+ mali_page_node *m_node;
+ mali_block_item *mali_blk_items = NULL;
+ int i = 0;
+
+ usable_size = size & ~(MALI_BLOCK_SIZE - 1);
+ MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size));
+ MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size));
+ num_blocks = usable_size / MALI_BLOCK_SIZE;
+ MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks));
+
+ if (usable_size == 0) {
+ MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size));
+ return NULL;
+ }
+
+ info = _mali_osk_calloc(1, sizeof(mali_block_allocator));
+ if (NULL != info) {
+ INIT_LIST_HEAD(&info->free);
+ spin_lock_init(&info->sp_lock);
+ info->total_num = num_blocks;
+ mali_blk_items = _mali_osk_calloc(1, sizeof(mali_block_item) * num_blocks);
+
+ if (mali_blk_items) {
+ info->items = mali_blk_items;
+ /* add blocks(4k size) to free list*/
+ for (i = 0 ; i < num_blocks ; i++) {
+ /* add block information*/
+ mali_blk_items[i].phy_addr = base_address + (i * MALI_BLOCK_SIZE);
+ /* add to free list */
+ m_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK);
+ if (m_node == NULL)
+ goto fail;
+ _mali_page_node_add_block_item(m_node, &(mali_blk_items[i]));
+ list_add_tail(&m_node->list, &info->free);
+ atomic_add(1, &info->free_num);
+ }
+ return info;
+ }
+ }
+fail:
+ mali_mem_block_allocator_destroy();
+ return NULL;
+}
+
+void mali_mem_block_allocator_destroy(void)
+{
+ struct mali_page_node *m_page, *m_tmp;
+ mali_block_allocator *info = mali_mem_block_gobal_allocator;
+ MALI_DEBUG_ASSERT_POINTER(info);
+ MALI_DEBUG_PRINT(4, ("Memory block destroy !\n"));
+
+ if (NULL == info)
+ return;
+
+ list_for_each_entry_safe(m_page, m_tmp , &info->free, list) {
+ MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+ list_del(&m_page->list);
+ kfree(m_page);
+ }
+
+ _mali_osk_free(info->items);
+ _mali_osk_free(info);
+}
+
+u32 mali_mem_block_release(mali_mem_backend *mem_bkend)
+{
+ mali_mem_allocation *alloc = mem_bkend->mali_allocation;
+ u32 free_pages_nr = 0;
+ MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK);
+
+ /* Unmap the memory from the mali virtual address space. */
+ mali_mem_block_mali_unmap(alloc);
+ mutex_lock(&mem_bkend->mutex);
+ free_pages_nr = mali_mem_block_free(&mem_bkend->block_mem);
+ mutex_unlock(&mem_bkend->mutex);
+ return free_pages_nr;
+}
+
+
+int mali_mem_block_alloc(mali_mem_block_mem *block_mem, u32 size)
+{
+ struct mali_page_node *m_page, *m_tmp;
+ size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
+ mali_block_allocator *info = mali_mem_block_gobal_allocator;
+ MALI_DEBUG_ASSERT_POINTER(info);
+
+ MALI_DEBUG_PRINT(4, ("BLOCK Mem: Allocate size = 0x%x\n", size));
+ /*do some init */
+ INIT_LIST_HEAD(&block_mem->pfns);
+
+ spin_lock(&info->sp_lock);
+ /*check if have enough space*/
+ if (atomic_read(&info->free_num) > page_count) {
+ list_for_each_entry_safe(m_page, m_tmp , &info->free, list) {
+ if (page_count > 0) {
+ MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+ MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(m_page) == 0);
+ list_move(&m_page->list, &block_mem->pfns);
+ block_mem->count++;
+ atomic_dec(&info->free_num);
+ _mali_page_node_ref(m_page);
+ } else {
+ break;
+ }
+ page_count--;
+ }
+ } else {
+ /* can't allocate from BLOCK memory*/
+ spin_unlock(&info->sp_lock);
+ return -1;
+ }
+
+ spin_unlock(&info->sp_lock);
+ return 0;
+}
+
+u32 mali_mem_block_free(mali_mem_block_mem *block_mem)
+{
+ u32 free_pages_nr = 0;
+
+ free_pages_nr = mali_mem_block_free_list(&block_mem->pfns);
+ MALI_DEBUG_PRINT(4, ("BLOCK Mem free : allocated size = 0x%x, free size = 0x%x\n", block_mem->count * _MALI_OSK_MALI_PAGE_SIZE,
+ free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
+ block_mem->count = 0;
+ MALI_DEBUG_ASSERT(list_empty(&block_mem->pfns));
+
+ return free_pages_nr;
+}
+
+
+u32 mali_mem_block_free_list(struct list_head *list)
+{
+ struct mali_page_node *m_page, *m_tmp;
+ mali_block_allocator *info = mali_mem_block_gobal_allocator;
+ u32 free_pages_nr = 0;
+
+ if (info) {
+ spin_lock(&info->sp_lock);
+ list_for_each_entry_safe(m_page, m_tmp , list, list) {
+ if (1 == _mali_page_node_get_ref_count(m_page)) {
+ free_pages_nr++;
+ }
+ mali_mem_block_free_node(m_page);
+ }
+ spin_unlock(&info->sp_lock);
+ }
+ return free_pages_nr;
+}
+
+/* free the node,*/
+void mali_mem_block_free_node(struct mali_page_node *node)
+{
+ mali_block_allocator *info = mali_mem_block_gobal_allocator;
+
+ /* only handle BLOCK node */
+ if (node->type == MALI_PAGE_NODE_BLOCK && info) {
+ /*Need to make this atomic?*/
+ if (1 == _mali_page_node_get_ref_count(node)) {
+ /*Move to free list*/
+ _mali_page_node_unref(node);
+ list_move_tail(&node->list, &info->free);
+ atomic_add(1, &info->free_num);
+ } else {
+ _mali_page_node_unref(node);
+ list_del(&node->list);
+ kfree(node);
+ }
+ }
+}
+
+/* unref the node, but not free it */
+_mali_osk_errcode_t mali_mem_block_unref_node(struct mali_page_node *node)
+{
+ mali_block_allocator *info = mali_mem_block_gobal_allocator;
+ mali_page_node *new_node;
+
+ /* only handle BLOCK node */
+ if (node->type == MALI_PAGE_NODE_BLOCK && info) {
+ /*Need to make this atomic?*/
+ if (1 == _mali_page_node_get_ref_count(node)) {
+ /* allocate a new node, Add to free list, keep the old node*/
+ _mali_page_node_unref(node);
+ new_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK);
+ if (new_node) {
+ memcpy(new_node, node, sizeof(mali_page_node));
+ list_add(&new_node->list, &info->free);
+ atomic_add(1, &info->free_num);
+ } else
+ return _MALI_OSK_ERR_FAULT;
+
+ } else {
+ _mali_page_node_unref(node);
+ }
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+
+int mali_mem_block_mali_map(mali_mem_block_mem *block_mem, struct mali_session_data *session, u32 vaddr, u32 props)
+{
+ struct mali_page_directory *pagedir = session->page_directory;
+ struct mali_page_node *m_page;
+ dma_addr_t phys;
+ u32 virt = vaddr;
+ u32 prop = props;
+
+ list_for_each_entry(m_page, &block_mem->pfns, list) {
+ MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+ phys = _mali_page_node_get_dma_addr(m_page);
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+ /* Verify that the "physical" address is 32-bit and
+ * usable for Mali, when on a system with bus addresses
+ * wider than 32-bit. */
+ MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+ mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
+ virt += MALI_MMU_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+void mali_mem_block_mali_unmap(mali_mem_allocation *alloc)
+{
+ struct mali_session_data *session;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ mali_session_memory_lock(session);
+ mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+ alloc->flags);
+ mali_session_memory_unlock(session);
+}
+
+
+int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
+{
+ int ret;
+ mali_mem_block_mem *block_mem = &mem_bkend->block_mem;
+ unsigned long addr = vma->vm_start;
+ struct mali_page_node *m_page;
+ MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK);
+
+ list_for_each_entry(m_page, &block_mem->pfns, list) {
+ MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
+ ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
+
+ if (unlikely(0 != ret)) {
+ return -EFAULT;
+ }
+ addr += _MALI_OSK_MALI_PAGE_SIZE;
+
+ }
+
+ return 0;
+}
+
+
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size)
+{
+ mali_block_allocator *allocator;
+
+ /* Do the low level linux operation first */
+
+ /* Request ownership of the memory */
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(start, size, "Dedicated Mali GPU memory")) {
+ MALI_DEBUG_PRINT(1, ("Failed to request memory region for frame buffer (0x%08X - 0x%08X)\n", start, start + size - 1));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create generic block allocator object to handle it */
+ allocator = mali_mem_block_allocator_create(start, size);
+
+ if (NULL == allocator) {
+ MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+ _mali_osk_mem_unreqregion(start, size);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ mali_mem_block_gobal_allocator = (mali_block_allocator *)allocator;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+mali_bool mali_memory_have_dedicated_memory(void)
+{
+ return mali_mem_block_gobal_allocator ? MALI_TRUE : MALI_FALSE;
+}
+
+u32 mali_mem_block_allocator_stat(void)
+{
+ mali_block_allocator *allocator = mali_mem_block_gobal_allocator;
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+
+ return (allocator->total_num - atomic_read(&allocator->free_num)) * _MALI_OSK_MALI_PAGE_SIZE;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_block_alloc.h b/drivers/gpu/arm/utgard/linux/mali_memory_block_alloc.h
new file mode 100644
index 000000000000..129434de67df
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_block_alloc.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2010, 2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_BLOCK_ALLOCATOR_H__
+#define __MALI_BLOCK_ALLOCATOR_H__
+
+#include "mali_session.h"
+#include "mali_memory.h"
+#include <linux/spinlock.h>
+
+#include "mali_memory_types.h"
+
+#define MALI_BLOCK_SIZE (PAGE_SIZE) /* 4 kB, manage BLOCK memory as page size */
+#define MALI_BLOCK_REF_MASK (0xFFF)
+#define MALI_BLOCK_MAX_REF_COUNT (0xFFF)
+
+
+
+typedef struct mali_block_allocator {
+ /*
+ * In free list, each node's ref_count is 0,
+ * ref_count added when allocated or referenced in COW
+ */
+ mali_block_item *items; /* information for each block item*/
+ struct list_head free; /*free list of mali_memory_node*/
+ spinlock_t sp_lock; /*lock for reference count & free list opertion*/
+ u32 total_num; /* Number of total pages*/
+ atomic_t free_num; /*number of free pages*/
+} mali_block_allocator;
+
+unsigned long _mali_blk_item_get_phy_addr(mali_block_item *item);
+unsigned long _mali_blk_item_get_pfn(mali_block_item *item);
+u32 mali_mem_block_get_ref_count(mali_page_node *node);
+u32 mali_mem_block_add_ref(mali_page_node *node);
+u32 mali_mem_block_dec_ref(mali_page_node *node);
+u32 mali_mem_block_release(mali_mem_backend *mem_bkend);
+int mali_mem_block_alloc(mali_mem_block_mem *block_mem, u32 size);
+int mali_mem_block_mali_map(mali_mem_block_mem *block_mem, struct mali_session_data *session, u32 vaddr, u32 props);
+void mali_mem_block_mali_unmap(mali_mem_allocation *alloc);
+
+int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma);
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size);
+mali_bool mali_memory_have_dedicated_memory(void);
+u32 mali_mem_block_free(mali_mem_block_mem *block_mem);
+u32 mali_mem_block_free_list(struct list_head *list);
+void mali_mem_block_free_node(struct mali_page_node *node);
+void mali_mem_block_allocator_destroy(void);
+_mali_osk_errcode_t mali_mem_block_unref_node(struct mali_page_node *node);
+u32 mali_mem_block_allocator_stat(void);
+
+#endif /* __MALI_BLOCK_ALLOCATOR_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_cow.c b/drivers/gpu/arm/utgard/linux/mali_memory_cow.c
new file mode 100644
index 000000000000..bcd0f8713771
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_cow.c
@@ -0,0 +1,754 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#ifdef CONFIG_ARM
+#include <asm/outercache.h>
+#endif
+#include <asm/dma-mapping.h>
+
+#include "mali_memory.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+#include "mali_kernel_linux.h"
+#include "mali_memory_cow.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_memory_swap_alloc.h"
+
+/**
+* allocate pages for COW backend and flush cache
+*/
+static struct page *mali_mem_cow_alloc_page(void)
+
+{
+ mali_mem_os_mem os_mem;
+ struct mali_page_node *node;
+ struct page *new_page;
+
+ int ret = 0;
+ /* allocate pages from os mem */
+ ret = mali_mem_os_alloc_pages(&os_mem, _MALI_OSK_MALI_PAGE_SIZE);
+
+ if (ret) {
+ return NULL;
+ }
+
+ MALI_DEBUG_ASSERT(1 == os_mem.count);
+
+ node = _MALI_OSK_CONTAINER_OF(os_mem.pages.next, struct mali_page_node, list);
+ new_page = node->page;
+ node->page = NULL;
+ list_del(&node->list);
+ kfree(node);
+
+ return new_page;
+}
+
+
+static struct list_head *_mali_memory_cow_get_node_list(mali_mem_backend *target_bk,
+ u32 target_offset,
+ u32 target_size)
+{
+ MALI_DEBUG_ASSERT(MALI_MEM_OS == target_bk->type || MALI_MEM_COW == target_bk->type ||
+ MALI_MEM_BLOCK == target_bk->type || MALI_MEM_SWAP == target_bk->type);
+
+ if (MALI_MEM_OS == target_bk->type) {
+ MALI_DEBUG_ASSERT(&target_bk->os_mem);
+ MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->os_mem.count);
+ return &target_bk->os_mem.pages;
+ } else if (MALI_MEM_COW == target_bk->type) {
+ MALI_DEBUG_ASSERT(&target_bk->cow_mem);
+ MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->cow_mem.count);
+ return &target_bk->cow_mem.pages;
+ } else if (MALI_MEM_BLOCK == target_bk->type) {
+ MALI_DEBUG_ASSERT(&target_bk->block_mem);
+ MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->block_mem.count);
+ return &target_bk->block_mem.pfns;
+ } else if (MALI_MEM_SWAP == target_bk->type) {
+ MALI_DEBUG_ASSERT(&target_bk->swap_mem);
+ MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->swap_mem.count);
+ return &target_bk->swap_mem.pages;
+ }
+
+ return NULL;
+}
+
+/**
+* Do COW for os memory - support do COW for memory from bank memory
+* The range_start/size can be zero, which means it will call cow_modify_range
+* latter.
+* This function allocate new pages for COW backend from os mem for a modified range
+* It will keep the page which not in the modified range and Add ref to it
+*
+* @target_bk - target allocation's backend(the allocation need to do COW)
+* @target_offset - the offset in target allocation to do COW(for support COW a memory allocated from memory_bank, 4K align)
+* @target_size - size of target allocation to do COW (for support memory bank)
+* @backend -COW backend
+* @range_start - offset of modified range (4K align)
+* @range_size - size of modified range
+*/
+_mali_osk_errcode_t mali_memory_cow_os_memory(mali_mem_backend *target_bk,
+ u32 target_offset,
+ u32 target_size,
+ mali_mem_backend *backend,
+ u32 range_start,
+ u32 range_size)
+{
+ mali_mem_cow *cow = &backend->cow_mem;
+ struct mali_page_node *m_page, *m_tmp, *page_node;
+ int target_page = 0;
+ struct page *new_page;
+ struct list_head *pages = NULL;
+
+ pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);
+
+ if (NULL == pages) {
+ MALI_DEBUG_ASSERT(0);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT(0 == cow->count);
+
+ INIT_LIST_HEAD(&cow->pages);
+ mutex_lock(&target_bk->mutex);
+ list_for_each_entry_safe(m_page, m_tmp, pages, list) {
+ /* add page from (target_offset,target_offset+size) to cow backend */
+ if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) &&
+ (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) {
+
+ /* allocate a new page node, alway use OS memory for COW */
+ page_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
+
+ if (NULL == page_node) {
+ mutex_unlock(&target_bk->mutex);
+ goto error;
+ }
+
+ INIT_LIST_HEAD(&page_node->list);
+
+ /* check if in the modified range*/
+ if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&
+ (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {
+ /* need to allocate a new page */
+ /* To simplify the case, All COW memory is allocated from os memory ?*/
+ new_page = mali_mem_cow_alloc_page();
+
+ if (NULL == new_page) {
+ kfree(page_node);
+ mutex_unlock(&target_bk->mutex);
+ goto error;
+ }
+
+ _mali_page_node_add_page(page_node, new_page);
+ } else {
+ /*Add Block memory case*/
+ if (m_page->type != MALI_PAGE_NODE_BLOCK) {
+ _mali_page_node_add_page(page_node, m_page->page);
+ } else {
+ page_node->type = MALI_PAGE_NODE_BLOCK;
+ _mali_page_node_add_block_item(page_node, m_page->blk_it);
+ }
+
+ /* add ref to this page */
+ _mali_page_node_ref(m_page);
+ }
+
+ /* add it to COW backend page list */
+ list_add_tail(&page_node->list, &cow->pages);
+ cow->count++;
+ }
+ target_page++;
+ }
+ mutex_unlock(&target_bk->mutex);
+ return _MALI_OSK_ERR_OK;
+error:
+ mali_mem_cow_release(backend, MALI_FALSE);
+ return _MALI_OSK_ERR_FAULT;
+}
+
+_mali_osk_errcode_t mali_memory_cow_swap_memory(mali_mem_backend *target_bk,
+ u32 target_offset,
+ u32 target_size,
+ mali_mem_backend *backend,
+ u32 range_start,
+ u32 range_size)
+{
+ mali_mem_cow *cow = &backend->cow_mem;
+ struct mali_page_node *m_page, *m_tmp, *page_node;
+ int target_page = 0;
+ struct mali_swap_item *swap_item;
+ struct list_head *pages = NULL;
+
+ pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);
+ if (NULL == pages) {
+ MALI_DEBUG_ASSERT(0);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT(0 == cow->count);
+
+ INIT_LIST_HEAD(&cow->pages);
+ mutex_lock(&target_bk->mutex);
+
+ backend->flags |= MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN;
+
+ list_for_each_entry_safe(m_page, m_tmp, pages, list) {
+ /* add page from (target_offset,target_offset+size) to cow backend */
+ if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) &&
+ (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) {
+
+ /* allocate a new page node, use swap memory for COW memory swap cowed flag. */
+ page_node = _mali_page_node_allocate(MALI_PAGE_NODE_SWAP);
+
+ if (NULL == page_node) {
+ mutex_unlock(&target_bk->mutex);
+ goto error;
+ }
+
+ /* check if in the modified range*/
+ if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&
+ (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {
+ /* need to allocate a new page */
+ /* To simplify the case, All COW memory is allocated from os memory ?*/
+ swap_item = mali_mem_swap_alloc_swap_item();
+
+ if (NULL == swap_item) {
+ kfree(page_node);
+ mutex_unlock(&target_bk->mutex);
+ goto error;
+ }
+
+ swap_item->idx = mali_mem_swap_idx_alloc();
+
+ if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW.\n"));
+ kfree(page_node);
+ kfree(swap_item);
+ mutex_unlock(&target_bk->mutex);
+ goto error;
+ }
+
+ _mali_page_node_add_swap_item(page_node, swap_item);
+ } else {
+ _mali_page_node_add_swap_item(page_node, m_page->swap_it);
+
+ /* add ref to this page */
+ _mali_page_node_ref(m_page);
+ }
+
+ list_add_tail(&page_node->list, &cow->pages);
+ cow->count++;
+ }
+ target_page++;
+ }
+ mutex_unlock(&target_bk->mutex);
+
+ return _MALI_OSK_ERR_OK;
+error:
+ mali_mem_swap_release(backend, MALI_FALSE);
+ return _MALI_OSK_ERR_FAULT;
+
+}
+
+
+_mali_osk_errcode_t _mali_mem_put_page_node(mali_page_node *node)
+{
+ if (node->type == MALI_PAGE_NODE_OS) {
+ return mali_mem_os_put_page(node->page);
+ } else if (node->type == MALI_PAGE_NODE_BLOCK) {
+ return mali_mem_block_unref_node(node);
+ } else if (node->type == MALI_PAGE_NODE_SWAP) {
+ return _mali_mem_swap_put_page_node(node);
+ } else
+ MALI_DEBUG_ASSERT(0);
+ return _MALI_OSK_ERR_FAULT;
+}
+
+
+/**
+* Modify a range of a exist COW backend
+* @backend -COW backend
+* @range_start - offset of modified range (4K align)
+* @range_size - size of modified range(in byte)
+*/
+_mali_osk_errcode_t mali_memory_cow_modify_range(mali_mem_backend *backend,
+ u32 range_start,
+ u32 range_size)
+{
+ mali_mem_allocation *alloc = NULL;
+ mali_mem_cow *cow = &backend->cow_mem;
+ struct mali_page_node *m_page, *m_tmp;
+ LIST_HEAD(pages);
+ struct page *new_page;
+ u32 count = 0;
+ s32 change_pages_nr = 0;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
+
+ if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ alloc = backend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);
+ MALI_DEBUG_ASSERT(((range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE) <= cow->count);
+
+ mutex_lock(&backend->mutex);
+
+ /* free pages*/
+ list_for_each_entry_safe(m_page, m_tmp, &cow->pages, list) {
+
+ /* check if in the modified range*/
+ if ((count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&
+ (count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {
+ if (MALI_PAGE_NODE_SWAP != m_page->type) {
+ new_page = mali_mem_cow_alloc_page();
+
+ if (NULL == new_page) {
+ goto error;
+ }
+ if (1 != _mali_page_node_get_ref_count(m_page))
+ change_pages_nr++;
+ /* unref old page*/
+ if (_mali_mem_put_page_node(m_page)) {
+ __free_page(new_page);
+ goto error;
+ }
+ /* add new page*/
+ /* always use OS for COW*/
+ m_page->type = MALI_PAGE_NODE_OS;
+ _mali_page_node_add_page(m_page, new_page);
+ } else {
+ struct mali_swap_item *swap_item;
+
+ swap_item = mali_mem_swap_alloc_swap_item();
+
+ if (NULL == swap_item) {
+ goto error;
+ }
+
+ swap_item->idx = mali_mem_swap_idx_alloc();
+
+ if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW modify range.\n"));
+ kfree(swap_item);
+ goto error;
+ }
+
+ if (1 != _mali_page_node_get_ref_count(m_page)) {
+ change_pages_nr++;
+ }
+
+ if (_mali_mem_put_page_node(m_page)) {
+ mali_mem_swap_free_swap_item(swap_item);
+ goto error;
+ }
+
+ _mali_page_node_add_swap_item(m_page, swap_item);
+ }
+ }
+ count++;
+ }
+ cow->change_pages_nr = change_pages_nr;
+
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == alloc->type);
+
+ /* ZAP cpu mapping(modified range), and do cpu mapping here if need */
+ if (NULL != alloc->cpu_mapping.vma) {
+ MALI_DEBUG_ASSERT(0 != alloc->backend_handle);
+ MALI_DEBUG_ASSERT(NULL != alloc->cpu_mapping.vma);
+ MALI_DEBUG_ASSERT(alloc->cpu_mapping.vma->vm_end - alloc->cpu_mapping.vma->vm_start >= range_size);
+
+ if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
+ zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size);
+
+ ret = mali_mem_cow_cpu_map_pages_locked(backend, alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size / _MALI_OSK_MALI_PAGE_SIZE);
+
+ if (unlikely(ret != _MALI_OSK_ERR_OK)) {
+ MALI_DEBUG_PRINT(2, ("mali_memory_cow_modify_range: cpu mapping failed !\n"));
+ ret = _MALI_OSK_ERR_FAULT;
+ }
+ } else {
+ /* used to trigger page fault for swappable cowed memory. */
+ alloc->cpu_mapping.vma->vm_flags |= VM_PFNMAP;
+ alloc->cpu_mapping.vma->vm_flags |= VM_MIXEDMAP;
+
+ zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size);
+ /* delete this flag to let swappble is ummapped regard to stauct page not page frame. */
+ alloc->cpu_mapping.vma->vm_flags &= ~VM_PFNMAP;
+ alloc->cpu_mapping.vma->vm_flags &= ~VM_MIXEDMAP;
+ }
+ }
+
+error:
+ mutex_unlock(&backend->mutex);
+ return ret;
+
+}
+
+
+/**
+* Allocate pages for COW backend
+* @alloc -allocation for COW allocation
+* @target_bk - target allocation's backend(the allocation need to do COW)
+* @target_offset - the offset in target allocation to do COW(for support COW a memory allocated from memory_bank, 4K align)
+* @target_size - size of target allocation to do COW (for support memory bank)(in byte)
+* @backend -COW backend
+* @range_start - offset of modified range (4K align)
+* @range_size - size of modified range(in byte)
+*/
+_mali_osk_errcode_t mali_memory_do_cow(mali_mem_backend *target_bk,
+ u32 target_offset,
+ u32 target_size,
+ mali_mem_backend *backend,
+ u32 range_start,
+ u32 range_size)
+{
+ struct mali_session_data *session = backend->mali_allocation->session;
+
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size & offset must be a multiple of the system page size */
+ if (target_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ if (target_offset % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check backend type */
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);
+
+ switch (target_bk->type) {
+ case MALI_MEM_OS:
+ case MALI_MEM_BLOCK:
+ return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
+ break;
+ case MALI_MEM_COW:
+ if (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED) {
+ return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
+ } else {
+ return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
+ }
+ break;
+ case MALI_MEM_SWAP:
+ return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
+ break;
+ case MALI_MEM_EXTERNAL:
+ /*NOT support yet*/
+ MALI_DEBUG_ASSERT(0);
+ break;
+ case MALI_MEM_DMA_BUF:
+ /*NOT support yet*/
+ MALI_DEBUG_ASSERT(0);
+ break;
+ case MALI_MEM_UMP:
+ /*NOT support yet*/
+ MALI_DEBUG_ASSERT(0);
+ break;
+ default:
+ /*Not support yet*/
+ MALI_DEBUG_ASSERT(0);
+ break;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+
+/**
+* Map COW backend memory to mali
+* Support OS/BLOCK for mali_page_node
+*/
+int mali_mem_cow_mali_map(mali_mem_backend *mem_bkend, u32 range_start, u32 range_size)
+{
+ mali_mem_allocation *cow_alloc;
+ struct mali_page_node *m_page;
+ struct mali_session_data *session;
+ struct mali_page_directory *pagedir;
+ u32 virt, start;
+
+ cow_alloc = mem_bkend->mali_allocation;
+ virt = cow_alloc->mali_vma_node.vm_node.start;
+ start = virt;
+
+ MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
+ MALI_DEBUG_ASSERT_POINTER(cow_alloc);
+
+ session = cow_alloc->session;
+ pagedir = session->page_directory;
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+ list_for_each_entry(m_page, &mem_bkend->cow_mem.pages, list) {
+ if ((virt - start >= range_start) && (virt - start < range_start + range_size)) {
+ dma_addr_t phys = _mali_page_node_get_dma_addr(m_page);
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+ MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+ mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys,
+ MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+ }
+ virt += MALI_MMU_PAGE_SIZE;
+ }
+ return 0;
+}
+
+/**
+* Map COW backend to cpu
+* support OS/BLOCK memory
+*/
+int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
+{
+ mali_mem_cow *cow = &mem_bkend->cow_mem;
+ struct mali_page_node *m_page;
+ int ret;
+ unsigned long addr = vma->vm_start;
+ MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);
+
+ list_for_each_entry(m_page, &cow->pages, list) {
+ /* We should use vm_insert_page, but it does a dcache
+ * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
+ ret = vm_insert_page(vma, addr, page);
+ */
+ ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
+
+ if (unlikely(0 != ret)) {
+ return ret;
+ }
+ addr += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+/**
+* Map some pages(COW backend) to CPU vma@vaddr
+*@ mem_bkend - COW backend
+*@ vma
+*@ vaddr -start CPU vaddr mapped to
+*@ num - max number of pages to map to CPU vaddr
+*/
+_mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bkend,
+ struct vm_area_struct *vma,
+ unsigned long vaddr,
+ int num)
+{
+ mali_mem_cow *cow = &mem_bkend->cow_mem;
+ struct mali_page_node *m_page;
+ int ret;
+ int offset;
+ int count ;
+ unsigned long vstart = vma->vm_start;
+ count = 0;
+ MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);
+ MALI_DEBUG_ASSERT(0 == vaddr % _MALI_OSK_MALI_PAGE_SIZE);
+ MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE);
+ offset = (vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE;
+
+ list_for_each_entry(m_page, &cow->pages, list) {
+ if ((count >= offset) && (count < offset + num)) {
+ ret = vm_insert_pfn(vma, vaddr, _mali_page_node_get_pfn(m_page));
+
+ if (unlikely(0 != ret)) {
+ if (count == offset) {
+ return _MALI_OSK_ERR_FAULT;
+ } else {
+ /* ret is EBUSY when page isn't in modify range, but now it's OK*/
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+ vaddr += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+ count++;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+/**
+* Release COW backend memory
+* free it directly(put_page--unref page), not put into pool
+*/
+u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped)
+{
+ mali_mem_allocation *alloc;
+ u32 free_pages_nr = 0;
+ MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
+ alloc = mem_bkend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+
+ if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (MALI_MEM_BACKEND_FLAG_SWAP_COWED & mem_bkend->flags)) {
+ /* Unmap the memory from the mali virtual address space. */
+ if (MALI_TRUE == is_mali_mapped)
+ mali_mem_os_mali_unmap(alloc);
+ /* free cow backend list*/
+ free_pages_nr = mali_mem_os_free(&mem_bkend->cow_mem.pages, mem_bkend->cow_mem.count, MALI_TRUE);
+ free_pages_nr += mali_mem_block_free_list(&mem_bkend->cow_mem.pages);
+
+ MALI_DEBUG_ASSERT(list_empty(&mem_bkend->cow_mem.pages));
+ } else {
+ free_pages_nr = mali_mem_swap_release(mem_bkend, is_mali_mapped);
+ }
+
+
+ MALI_DEBUG_PRINT(4, ("COW Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->cow_mem.count * _MALI_OSK_MALI_PAGE_SIZE,
+ free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
+
+ mem_bkend->cow_mem.count = 0;
+ return free_pages_nr;
+}
+
+
+/* Dst node could os node or swap node. */
+void _mali_mem_cow_copy_page(mali_page_node *src_node, mali_page_node *dst_node)
+{
+ void *dst, *src;
+ struct page *dst_page;
+ dma_addr_t dma_addr;
+
+ MALI_DEBUG_ASSERT(src_node != NULL);
+ MALI_DEBUG_ASSERT(dst_node != NULL);
+ MALI_DEBUG_ASSERT(dst_node->type == MALI_PAGE_NODE_OS
+ || dst_node->type == MALI_PAGE_NODE_SWAP);
+
+ if (dst_node->type == MALI_PAGE_NODE_OS) {
+ dst_page = dst_node->page;
+ } else {
+ dst_page = dst_node->swap_it->page;
+ }
+
+ dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(dst_node),
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ /* map it , and copy the content*/
+ dst = kmap_atomic(dst_page);
+
+ if (src_node->type == MALI_PAGE_NODE_OS ||
+ src_node->type == MALI_PAGE_NODE_SWAP) {
+ struct page *src_page;
+
+ if (src_node->type == MALI_PAGE_NODE_OS) {
+ src_page = src_node->page;
+ } else {
+ src_page = src_node->swap_it->page;
+ }
+
+ /* Clear and invaliate cache */
+ /* In ARM architecture, speculative read may pull stale data into L1 cache
+ * for kernel linear mapping page table. DMA_BIDIRECTIONAL could
+ * invalidate the L1 cache so that following read get the latest data
+ */
+ dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(src_node),
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ src = kmap_atomic(src_page);
+ memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);
+ kunmap_atomic(src);
+ dma_addr = dma_map_page(&mali_platform_device->dev, src_page,
+ 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ if (src_node->type == MALI_PAGE_NODE_SWAP) {
+ src_node->swap_it->dma_addr = dma_addr;
+ }
+ } else if (src_node->type == MALI_PAGE_NODE_BLOCK) {
+ /*
+ * use ioremap to map src for BLOCK memory
+ */
+ src = ioremap_nocache(_mali_page_node_get_dma_addr(src_node), _MALI_OSK_MALI_PAGE_SIZE);
+ memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);
+ iounmap(src);
+ }
+ kunmap_atomic(dst);
+ dma_addr = dma_map_page(&mali_platform_device->dev, dst_page,
+ 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+
+ if (dst_node->type == MALI_PAGE_NODE_SWAP) {
+ dst_node->swap_it->dma_addr = dma_addr;
+ }
+}
+
+
+/*
+* allocate page on demand when CPU access it,
+* THis used in page fault handler
+*/
+_mali_osk_errcode_t mali_mem_cow_allocate_on_demand(mali_mem_backend *mem_bkend, u32 offset_page)
+{
+ struct page *new_page = NULL;
+ struct mali_page_node *new_node = NULL;
+ int i = 0;
+ struct mali_page_node *m_page, *found_node = NULL;
+ struct mali_session_data *session = NULL;
+ mali_mem_cow *cow = &mem_bkend->cow_mem;
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
+ MALI_DEBUG_ASSERT(offset_page < mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE);
+ MALI_DEBUG_PRINT(4, ("mali_mem_cow_allocate_on_demand !, offset_page =0x%x\n", offset_page));
+
+ /* allocate new page here */
+ new_page = mali_mem_cow_alloc_page();
+ if (!new_page)
+ return _MALI_OSK_ERR_NOMEM;
+
+ new_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
+ if (!new_node) {
+ __free_page(new_page);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* find the page in backend*/
+ list_for_each_entry(m_page, &cow->pages, list) {
+ if (i == offset_page) {
+ found_node = m_page;
+ break;
+ }
+ i++;
+ }
+ MALI_DEBUG_ASSERT(found_node);
+ if (NULL == found_node) {
+ __free_page(new_page);
+ kfree(new_node);
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ _mali_page_node_add_page(new_node, new_page);
+
+ /* Copy the src page's content to new page */
+ _mali_mem_cow_copy_page(found_node, new_node);
+
+ MALI_DEBUG_ASSERT_POINTER(mem_bkend->mali_allocation);
+ session = mem_bkend->mali_allocation->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+ if (1 != _mali_page_node_get_ref_count(found_node)) {
+ atomic_add(1, &session->mali_mem_allocated_pages);
+ if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+ session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+ }
+ mem_bkend->cow_mem.change_pages_nr++;
+ }
+ if (_mali_mem_put_page_node(found_node)) {
+ __free_page(new_page);
+ kfree(new_node);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ list_replace(&found_node->list, &new_node->list);
+
+ kfree(found_node);
+
+ /* map to GPU side*/
+ _mali_osk_mutex_wait(session->memory_lock);
+ mali_mem_cow_mali_map(mem_bkend, offset_page * _MALI_OSK_MALI_PAGE_SIZE, _MALI_OSK_MALI_PAGE_SIZE);
+ _mali_osk_mutex_signal(session->memory_lock);
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_cow.h b/drivers/gpu/arm/utgard/linux/mali_memory_cow.h
new file mode 100644
index 000000000000..c16ec7633c0a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_cow.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_COW_H__
+#define __MALI_MEMORY_COW_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+#include "mali_memory_types.h"
+
+int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma);
+_mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bkend,
+ struct vm_area_struct *vma,
+ unsigned long vaddr,
+ int num);
+
+_mali_osk_errcode_t mali_memory_do_cow(mali_mem_backend *target_bk,
+ u32 target_offset,
+ u32 target_size,
+ mali_mem_backend *backend,
+ u32 range_start,
+ u32 range_size);
+
+_mali_osk_errcode_t mali_memory_cow_modify_range(mali_mem_backend *backend,
+ u32 range_start,
+ u32 range_size);
+
+_mali_osk_errcode_t mali_memory_cow_os_memory(mali_mem_backend *target_bk,
+ u32 target_offset,
+ u32 target_size,
+ mali_mem_backend *backend,
+ u32 range_start,
+ u32 range_size);
+
+void _mali_mem_cow_copy_page(mali_page_node *src_node, mali_page_node *dst_node);
+
+int mali_mem_cow_mali_map(mali_mem_backend *mem_bkend, u32 range_start, u32 range_size);
+u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped);
+_mali_osk_errcode_t mali_mem_cow_allocate_on_demand(mali_mem_backend *mem_bkend, u32 offset_page);
+#endif
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_defer_bind.c b/drivers/gpu/arm/utgard/linux/mali_memory_defer_bind.c
new file mode 100644
index 000000000000..e51902f1bf65
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_defer_bind.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#ifdef CONFIG_ARM
+#include <asm/outercache.h>
+#endif
+#include <asm/dma-mapping.h>
+
+#include "mali_memory.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+#include "mali_kernel_linux.h"
+#include "mali_memory_defer_bind.h"
+#include "mali_executor.h"
+#include "mali_osk.h"
+#include "mali_scheduler.h"
+#include "mali_gp_job.h"
+
+mali_defer_bind_manager *mali_dmem_man = NULL;
+
+static u32 mali_dmem_get_gp_varying_size(struct mali_gp_job *gp_job)
+{
+ return gp_job->uargs.varying_memsize / _MALI_OSK_MALI_PAGE_SIZE;
+}
+
+_mali_osk_errcode_t mali_mem_defer_bind_manager_init(void)
+{
+ mali_dmem_man = _mali_osk_calloc(1, sizeof(struct mali_defer_bind_manager));
+ if (!mali_dmem_man)
+ return _MALI_OSK_ERR_NOMEM;
+
+ atomic_set(&mali_dmem_man->num_used_pages, 0);
+ atomic_set(&mali_dmem_man->num_dmem, 0);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+void mali_mem_defer_bind_manager_destory(void)
+{
+ if (mali_dmem_man) {
+ MALI_DEBUG_ASSERT(0 == atomic_read(&mali_dmem_man->num_dmem));
+ kfree(mali_dmem_man);
+ }
+ mali_dmem_man = NULL;
+}
+
+
+/*allocate pages from OS memory*/
+_mali_osk_errcode_t mali_mem_defer_alloc_mem(u32 require, struct mali_session_data *session, mali_defer_mem_block *dblock)
+{
+ int retval = 0;
+ u32 num_pages = require;
+ mali_mem_os_mem os_mem;
+
+ retval = mali_mem_os_alloc_pages(&os_mem, num_pages * _MALI_OSK_MALI_PAGE_SIZE);
+
+ /* add to free pages list */
+ if (0 == retval) {
+ MALI_DEBUG_PRINT(4, ("mali_mem_defer_alloc_mem ,,*** pages allocate = 0x%x \n", num_pages));
+ list_splice(&os_mem.pages, &dblock->free_pages);
+ atomic_add(os_mem.count, &dblock->num_free_pages);
+ atomic_add(os_mem.count, &session->mali_mem_allocated_pages);
+ if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+ session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+ }
+ return _MALI_OSK_ERR_OK;
+ } else
+ return _MALI_OSK_ERR_FAULT;
+}
+
+_mali_osk_errcode_t mali_mem_prepare_mem_for_job(struct mali_gp_job *next_gp_job, mali_defer_mem_block *dblock)
+{
+ u32 require_page;
+
+ if (!next_gp_job)
+ return _MALI_OSK_ERR_FAULT;
+
+ require_page = mali_dmem_get_gp_varying_size(next_gp_job);
+
+ MALI_DEBUG_PRINT(4, ("mali_mem_defer_prepare_mem_work, require alloc page 0x%x\n",
+ require_page));
+ /* allocate more pages from OS */
+ if (_MALI_OSK_ERR_OK != mali_mem_defer_alloc_mem(require_page, next_gp_job->session, dblock)) {
+ MALI_DEBUG_PRINT(1, ("ERROR##mali_mem_defer_prepare_mem_work, allocate page failed!!"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ next_gp_job->bind_flag = MALI_DEFER_BIND_MEMORY_PREPARED;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+/* do preparetion for allocation before defer bind */
+_mali_osk_errcode_t mali_mem_defer_bind_allocation_prepare(mali_mem_allocation *alloc, struct list_head *list)
+{
+ mali_mem_backend *mem_bkend = NULL;
+ struct mali_backend_bind_list *bk_list = _mali_osk_calloc(1, sizeof(struct mali_backend_bind_list));
+ if (NULL == bk_list)
+ return _MALI_OSK_ERR_FAULT;
+
+ INIT_LIST_HEAD(&bk_list->node);
+ /* Get backend memory */
+ mutex_lock(&mali_idr_mutex);
+ if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) {
+ MALI_DEBUG_PRINT(1, ("Can't find memory backend in defer bind!\n"));
+ mutex_unlock(&mali_idr_mutex);
+ kfree(bk_list);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ mutex_unlock(&mali_idr_mutex);
+ MALI_DEBUG_PRINT(4, ("bind_allocation_prepare:: allocation =%x vaddr=0x%x!\n", alloc, alloc->mali_vma_node.vm_node.start));
+
+ INIT_LIST_HEAD(&mem_bkend->os_mem.pages);
+
+ bk_list->bkend = mem_bkend;
+ bk_list->vaddr = alloc->mali_vma_node.vm_node.start;
+ bk_list->session = alloc->session;
+ bk_list->page_num = mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE;
+ MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS);
+
+ /* add to job to do list */
+ list_add(&bk_list->node, list);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+
+/* bind phyiscal memory to allocation
+This function will be called in IRQ handler*/
+static _mali_osk_errcode_t mali_mem_defer_bind_allocation(struct mali_backend_bind_list *bk_node,
+ struct list_head *pages)
+{
+ struct mali_session_data *session = bk_node->session;
+ mali_mem_backend *mem_bkend = bk_node->bkend;
+ MALI_DEBUG_PRINT(4, ("mali_mem_defer_bind_allocation, bind bkend = %x page num=0x%x vaddr=%x session=%x\n", mem_bkend, bk_node->page_num, bk_node->vaddr, session));
+
+ MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS);
+ list_splice(pages, &mem_bkend->os_mem.pages);
+ mem_bkend->os_mem.count = bk_node->page_num;
+
+ if (mem_bkend->type == MALI_MEM_OS) {
+ mali_mem_os_mali_map(&mem_bkend->os_mem, session, bk_node->vaddr, 0,
+ mem_bkend->os_mem.count, MALI_MMU_FLAGS_DEFAULT);
+ }
+ smp_wmb();
+ bk_node->flag = MALI_DEFER_BIND_MEMORY_BINDED;
+ mem_bkend->flags &= ~MALI_MEM_BACKEND_FLAG_NOT_BINDED;
+ mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_BINDED;
+ return _MALI_OSK_ERR_OK;
+}
+
+
+static struct list_head *mali_mem_defer_get_free_page_list(u32 count, struct list_head *pages, mali_defer_mem_block *dblock)
+{
+ int i = 0;
+ struct mali_page_node *m_page, *m_tmp;
+
+ if (atomic_read(&dblock->num_free_pages) < count) {
+ return NULL;
+ } else {
+ list_for_each_entry_safe(m_page, m_tmp, &dblock->free_pages, list) {
+ if (i < count) {
+ list_move_tail(&m_page->list, pages);
+ } else {
+ break;
+ }
+ i++;
+ }
+ MALI_DEBUG_ASSERT(i == count);
+ atomic_sub(count, &dblock->num_free_pages);
+ return pages;
+ }
+}
+
+
+/* called in job start IOCTL to bind physical memory for each allocations
+@ bk_list backend list to do defer bind
+@ pages page list to do this bind
+@ count number of pages
+*/
+_mali_osk_errcode_t mali_mem_defer_bind(u32 count, struct mali_gp_job *gp,
+ struct mali_defer_mem_block *dmem_block)
+{
+ struct mali_defer_mem *dmem = NULL;
+ struct mali_backend_bind_list *bkn, *bkn_tmp;
+ LIST_HEAD(pages);
+
+ MALI_DEBUG_PRINT(4, ("#BIND: GP job=%x## \n", gp));
+ dmem = (mali_defer_mem *)_mali_osk_calloc(1, sizeof(struct mali_defer_mem));
+ if (dmem) {
+ INIT_LIST_HEAD(&dmem->node);
+ gp->dmem = dmem;
+ } else {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ atomic_add(1, &mali_dmem_man->num_dmem);
+ /* for each bk_list backend, do bind */
+ list_for_each_entry_safe(bkn, bkn_tmp , &gp->vary_todo, node) {
+ INIT_LIST_HEAD(&pages);
+ if (likely(mali_mem_defer_get_free_page_list(bkn->page_num, &pages, dmem_block))) {
+ list_del(&bkn->node);
+ mali_mem_defer_bind_allocation(bkn, &pages);
+ _mali_osk_free(bkn);
+ } else {
+ /* not enough memory will not happen */
+ MALI_DEBUG_PRINT(1, ("#BIND: NOT enough memory when binded !!## \n"));
+ MALI_DEBUG_ASSERT(0);
+ }
+ }
+
+ if (!list_empty(&gp->vary_todo)) {
+ MALI_DEBUG_ASSERT(0);
+ }
+
+ dmem->flag = MALI_DEFER_BIND_MEMORY_BINDED;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_defer_dmem_free(struct mali_gp_job *gp)
+{
+ if (gp->dmem) {
+ atomic_dec(&mali_dmem_man->num_dmem);
+ _mali_osk_free(gp->dmem);
+ }
+}
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_defer_bind.h b/drivers/gpu/arm/utgard/linux/mali_memory_defer_bind.h
new file mode 100644
index 000000000000..ef67540434f5
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_defer_bind.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __MALI_MEMORY_DEFER_BIND_H_
+#define __MALI_MEMORY_DEFER_BIND_H_
+
+
+#include "mali_osk.h"
+#include "mali_session.h"
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+
+#include "mali_memory_types.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_uk_types.h"
+
+struct mali_gp_job;
+
+typedef struct mali_defer_mem {
+ struct list_head node; /*dlist node in bind manager */
+ u32 flag;
+} mali_defer_mem;
+
+
+typedef struct mali_defer_mem_block {
+ struct list_head free_pages; /* page pool */
+ atomic_t num_free_pages;
+} mali_defer_mem_block;
+
+/* varying memory list need to bind */
+typedef struct mali_backend_bind_list {
+ struct list_head node;
+ struct mali_mem_backend *bkend;
+ u32 vaddr;
+ u32 page_num;
+ struct mali_session_data *session;
+ u32 flag;
+} mali_backend_bind_lists;
+
+
+typedef struct mali_defer_bind_manager {
+ atomic_t num_used_pages;
+ atomic_t num_dmem;
+} mali_defer_bind_manager;
+
+_mali_osk_errcode_t mali_mem_defer_bind_manager_init(void);
+void mali_mem_defer_bind_manager_destory(void);
+_mali_osk_errcode_t mali_mem_defer_bind(u32 count, struct mali_gp_job *gp,
+ struct mali_defer_mem_block *dmem_block);
+_mali_osk_errcode_t mali_mem_defer_bind_allocation_prepare(mali_mem_allocation *alloc, struct list_head *list);
+_mali_osk_errcode_t mali_mem_prepare_mem_for_job(struct mali_gp_job *next_gp_job, mali_defer_mem_block *dblock);
+void mali_mem_defer_dmem_free(struct mali_gp_job *gp);
+
+#endif
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_dma_buf.c b/drivers/gpu/arm/utgard/linux/mali_memory_dma_buf.c
new file mode 100644
index 000000000000..2fa5028d8342
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_dma_buf.c
@@ -0,0 +1,369 @@
+/*
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/rbtree.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_linux.h"
+
+#include "mali_memory.h"
+#include "mali_memory_dma_buf.h"
+#include "mali_memory_virtual.h"
+#include "mali_pp_job.h"
+
+/*
+ * Map DMA buf attachment \a mem into \a session at virtual address \a virt.
+ */
+static int mali_dma_buf_map(mali_mem_backend *mem_backend)
+{
+ mali_mem_allocation *alloc;
+ struct mali_dma_buf_attachment *mem;
+ struct mali_session_data *session;
+ struct mali_page_directory *pagedir;
+ _mali_osk_errcode_t err;
+ struct scatterlist *sg;
+ u32 virt, flags;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+
+ alloc = mem_backend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+
+ mem = mem_backend->dma_buf.attachment;
+ MALI_DEBUG_ASSERT_POINTER(mem);
+
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(mem->session == session);
+
+ virt = alloc->mali_vma_node.vm_node.start;
+ flags = alloc->flags;
+
+ mali_session_memory_lock(session);
+ mem->map_ref++;
+
+ MALI_DEBUG_PRINT(5, ("Mali DMA-buf: map attachment %p, new map_ref = %d\n", mem, mem->map_ref));
+
+ if (1 == mem->map_ref) {
+
+ /* First reference taken, so we need to map the dma buf */
+ MALI_DEBUG_ASSERT(!mem->is_mapped);
+
+ mem->sgt = dma_buf_map_attachment(mem->attachment, DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(mem->sgt)) {
+ MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf attachment\n"));
+ mem->map_ref--;
+ mali_session_memory_unlock(session);
+ return -EFAULT;
+ }
+
+ err = mali_mem_mali_map_prepare(alloc);
+ if (_MALI_OSK_ERR_OK != err) {
+ MALI_DEBUG_PRINT(1, ("Mapping of DMA memory failed\n"));
+ mem->map_ref--;
+ mali_session_memory_unlock(session);
+ return -ENOMEM;
+ }
+
+ pagedir = mali_session_get_page_directory(session);
+ MALI_DEBUG_ASSERT_POINTER(pagedir);
+
+ for_each_sg(mem->sgt->sgl, sg, mem->sgt->nents, i) {
+ u32 size = sg_dma_len(sg);
+ dma_addr_t phys = sg_dma_address(sg);
+
+ /* sg must be page aligned. */
+ MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE);
+ MALI_DEBUG_ASSERT(0 == (phys & ~(uintptr_t)0xFFFFFFFF));
+
+ mali_mmu_pagedir_update(pagedir, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);
+
+ virt += size;
+ }
+
+ if (flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+ u32 guard_phys;
+ MALI_DEBUG_PRINT(7, ("Mapping in extra guard page\n"));
+
+ guard_phys = sg_dma_address(mem->sgt->sgl);
+ mali_mmu_pagedir_update(pagedir, virt, guard_phys, MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+ }
+
+ mem->is_mapped = MALI_TRUE;
+ mali_session_memory_unlock(session);
+ /* Wake up any thread waiting for buffer to become mapped */
+ wake_up_all(&mem->wait_queue);
+ } else {
+ MALI_DEBUG_ASSERT(mem->is_mapped);
+ mali_session_memory_unlock(session);
+ }
+
+ return 0;
+}
+
+static void mali_dma_buf_unmap(mali_mem_allocation *alloc, struct mali_dma_buf_attachment *mem)
+{
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ MALI_DEBUG_ASSERT_POINTER(mem);
+ MALI_DEBUG_ASSERT_POINTER(mem->attachment);
+ MALI_DEBUG_ASSERT_POINTER(mem->buf);
+ MALI_DEBUG_ASSERT_POINTER(alloc->session);
+
+ mali_session_memory_lock(alloc->session);
+ mem->map_ref--;
+
+ MALI_DEBUG_PRINT(5, ("Mali DMA-buf: unmap attachment %p, new map_ref = %d\n", mem, mem->map_ref));
+
+ if (0 == mem->map_ref) {
+ dma_buf_unmap_attachment(mem->attachment, mem->sgt, DMA_BIDIRECTIONAL);
+ if (MALI_TRUE == mem->is_mapped) {
+ mali_mem_mali_map_free(alloc->session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+ alloc->flags);
+ }
+ mem->is_mapped = MALI_FALSE;
+ }
+ mali_session_memory_unlock(alloc->session);
+ /* Wake up any thread waiting for buffer to become unmapped */
+ wake_up_all(&mem->wait_queue);
+}
+
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+int mali_dma_buf_map_job(struct mali_pp_job *job)
+{
+ struct mali_dma_buf_attachment *mem;
+ _mali_osk_errcode_t err;
+ int i;
+ int ret = 0;
+ u32 num_memory_cookies;
+ struct mali_session_data *session;
+ struct mali_vma_node *mali_vma_node = NULL;
+ mali_mem_allocation *mali_alloc = NULL;
+ mali_mem_backend *mem_bkend = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ num_memory_cookies = mali_pp_job_num_memory_cookies(job);
+
+ session = mali_pp_job_get_session(job);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ for (i = 0; i < num_memory_cookies; i++) {
+ u32 mali_addr = mali_pp_job_get_memory_cookie(job, i);
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+ MALI_DEBUG_ASSERT(NULL != mali_vma_node);
+ mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+ MALI_DEBUG_ASSERT(NULL != mali_alloc);
+ if (MALI_MEM_DMA_BUF != mali_alloc->type) {
+ continue;
+ }
+
+ /* Get backend memory & Map on CPU */
+ mutex_lock(&mali_idr_mutex);
+ mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+ mutex_unlock(&mali_idr_mutex);
+ MALI_DEBUG_ASSERT(NULL != mem_bkend);
+
+ mem = mem_bkend->dma_buf.attachment;
+
+ MALI_DEBUG_ASSERT_POINTER(mem);
+ MALI_DEBUG_ASSERT(mem->session == mali_pp_job_get_session(job));
+
+ err = mali_dma_buf_map(mem_bkend);
+ if (0 != err) {
+ MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to map dma-buf for mali address %x\n", mali_addr));
+ ret = -EFAULT;
+ continue;
+ }
+ }
+ return ret;
+}
+
+void mali_dma_buf_unmap_job(struct mali_pp_job *job)
+{
+ struct mali_dma_buf_attachment *mem;
+ int i;
+ u32 num_memory_cookies;
+ struct mali_session_data *session;
+ struct mali_vma_node *mali_vma_node = NULL;
+ mali_mem_allocation *mali_alloc = NULL;
+ mali_mem_backend *mem_bkend = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ num_memory_cookies = mali_pp_job_num_memory_cookies(job);
+
+ session = mali_pp_job_get_session(job);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ for (i = 0; i < num_memory_cookies; i++) {
+ u32 mali_addr = mali_pp_job_get_memory_cookie(job, i);
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+ MALI_DEBUG_ASSERT(NULL != mali_vma_node);
+ mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+ MALI_DEBUG_ASSERT(NULL != mali_alloc);
+ if (MALI_MEM_DMA_BUF != mali_alloc->type) {
+ continue;
+ }
+
+ /* Get backend memory & Map on CPU */
+ mutex_lock(&mali_idr_mutex);
+ mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+ mutex_unlock(&mali_idr_mutex);
+ MALI_DEBUG_ASSERT(NULL != mem_bkend);
+
+ mem = mem_bkend->dma_buf.attachment;
+
+ MALI_DEBUG_ASSERT_POINTER(mem);
+ MALI_DEBUG_ASSERT(mem->session == mali_pp_job_get_session(job));
+ mali_dma_buf_unmap(mem_bkend->mali_allocation, mem);
+ }
+}
+#endif /* !CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH */
+
+int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *user_arg)
+{
+ _mali_uk_dma_buf_get_size_s args;
+ int fd;
+ struct dma_buf *buf;
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if (0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_dma_buf_get_size_s))) {
+ return -EFAULT;
+ }
+
+ /* Do DMA-BUF stuff */
+ fd = args.mem_fd;
+
+ buf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(buf)) {
+ MALI_DEBUG_PRINT_ERROR(("Failed to get dma-buf from fd: %d\n", fd));
+ return PTR_RET(buf);
+ }
+
+ if (0 != put_user(buf->size, &user_arg->size)) {
+ dma_buf_put(buf);
+ return -EFAULT;
+ }
+
+ dma_buf_put(buf);
+
+ return 0;
+}
+
+_mali_osk_errcode_t mali_mem_bind_dma_buf(mali_mem_allocation *alloc,
+ mali_mem_backend *mem_backend,
+ int fd, u32 flags)
+{
+ struct dma_buf *buf;
+ struct mali_dma_buf_attachment *dma_mem;
+ struct mali_session_data *session = alloc->session;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+
+ /* get dma buffer */
+ buf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(buf)) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Currently, mapping of the full buffer are supported. */
+ if (alloc->psize != buf->size) {
+ goto failed_alloc_mem;
+ }
+
+ dma_mem = _mali_osk_calloc(1, sizeof(struct mali_dma_buf_attachment));
+ if (NULL == dma_mem) {
+ goto failed_alloc_mem;
+ }
+
+ dma_mem->buf = buf;
+ dma_mem->session = session;
+ dma_mem->map_ref = 0;
+ init_waitqueue_head(&dma_mem->wait_queue);
+
+ dma_mem->attachment = dma_buf_attach(dma_mem->buf, &mali_platform_device->dev);
+ if (NULL == dma_mem->attachment) {
+ goto failed_dma_attach;
+ }
+
+ mem_backend->dma_buf.attachment = dma_mem;
+
+ alloc->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;
+ if (flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+ alloc->flags |= MALI_MEM_FLAG_MALI_GUARD_PAGE;
+ }
+
+
+#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ /* Map memory into session's Mali virtual address space. */
+ if (0 != mali_dma_buf_map(mem_backend)) {
+ goto Failed_dma_map;
+ }
+#endif
+
+ return _MALI_OSK_ERR_OK;
+
+#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+Failed_dma_map:
+ mali_dma_buf_unmap(alloc, dma_mem);
+#endif
+ /* Wait for buffer to become unmapped */
+ wait_event(dma_mem->wait_queue, !dma_mem->is_mapped);
+ MALI_DEBUG_ASSERT(!dma_mem->is_mapped);
+ dma_buf_detach(dma_mem->buf, dma_mem->attachment);
+failed_dma_attach:
+ _mali_osk_free(dma_mem);
+failed_alloc_mem:
+ dma_buf_put(buf);
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_mem_unbind_dma_buf(mali_mem_backend *mem_backend)
+{
+ struct mali_dma_buf_attachment *mem;
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ MALI_DEBUG_ASSERT(MALI_MEM_DMA_BUF == mem_backend->type);
+
+ mem = mem_backend->dma_buf.attachment;
+ MALI_DEBUG_ASSERT_POINTER(mem);
+ MALI_DEBUG_ASSERT_POINTER(mem->attachment);
+ MALI_DEBUG_ASSERT_POINTER(mem->buf);
+ MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release attachment %p\n", mem));
+
+#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+ MALI_DEBUG_ASSERT_POINTER(mem_backend->mali_allocation);
+ /* We mapped implicitly on attach, so we need to unmap on release */
+ mali_dma_buf_unmap(mem_backend->mali_allocation, mem);
+#endif
+ /* Wait for buffer to become unmapped */
+ wait_event(mem->wait_queue, !mem->is_mapped);
+ MALI_DEBUG_ASSERT(!mem->is_mapped);
+
+ dma_buf_detach(mem->buf, mem->attachment);
+ dma_buf_put(mem->buf);
+
+ _mali_osk_free(mem);
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_dma_buf.h b/drivers/gpu/arm/utgard/linux/mali_memory_dma_buf.h
new file mode 100644
index 000000000000..859d3849e6b3
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_dma_buf.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_DMA_BUF_H__
+#define __MALI_MEMORY_DMA_BUF_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+#include "mali_memory.h"
+
+struct mali_pp_job;
+
+struct mali_dma_buf_attachment;
+struct mali_dma_buf_attachment {
+ struct dma_buf *buf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ struct mali_session_data *session;
+ int map_ref;
+ struct mutex map_lock;
+ mali_bool is_mapped;
+ wait_queue_head_t wait_queue;
+};
+
+int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *arg);
+
+void mali_mem_unbind_dma_buf(mali_mem_backend *mem_backend);
+
+_mali_osk_errcode_t mali_mem_bind_dma_buf(mali_mem_allocation *alloc,
+ mali_mem_backend *mem_backend,
+ int fd, u32 flags);
+
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+int mali_dma_buf_map_job(struct mali_pp_job *job);
+void mali_dma_buf_unmap_job(struct mali_pp_job *job);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_MEMORY_DMA_BUF_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_external.c b/drivers/gpu/arm/utgard/linux/mali_memory_external.c
new file mode 100644
index 000000000000..3733f2e0c80a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_external.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_memory.h"
+#include "mali_mem_validation.h"
+#include "mali_uk_types.h"
+
+void mali_mem_unbind_ext_buf(mali_mem_backend *mem_backend)
+{
+ mali_mem_allocation *alloc;
+ struct mali_session_data *session;
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ alloc = mem_backend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ MALI_DEBUG_ASSERT(MALI_MEM_EXTERNAL == mem_backend->type);
+
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+ mali_session_memory_lock(session);
+ mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+ alloc->flags);
+ mali_session_memory_unlock(session);
+}
+
+_mali_osk_errcode_t mali_mem_bind_ext_buf(mali_mem_allocation *alloc,
+ mali_mem_backend *mem_backend,
+ u32 phys_addr,
+ u32 flag)
+{
+ struct mali_session_data *session;
+ _mali_osk_errcode_t err;
+ u32 virt, phys, size;
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ size = alloc->psize;
+ session = (struct mali_session_data *)(uintptr_t)alloc->session;
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if (!size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if (size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+#if 0
+ /* Validate the mali physical range */
+ if (_MALI_OSK_ERR_OK != mali_mem_validation_check(phys_addr, size)) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+
+ if (flag & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+ alloc->flags |= MALI_MEM_FLAG_MALI_GUARD_PAGE;
+ }
+
+ mali_session_memory_lock(session);
+
+ virt = alloc->mali_vma_node.vm_node.start;
+ phys = phys_addr;
+
+ err = mali_mem_mali_map_prepare(alloc);
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_session_memory_unlock(session);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ mali_mmu_pagedir_update(session->page_directory, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);
+
+ if (alloc->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
+ mali_mmu_pagedir_update(session->page_directory, virt + size, phys, _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+ }
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
+ phys_addr, (phys_addr + size - 1),
+ virt));
+ mali_session_memory_unlock(session);
+
+ MALI_SUCCESS;
+}
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_external.h b/drivers/gpu/arm/utgard/linux/mali_memory_external.h
new file mode 100644
index 000000000000..645580b51fc9
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_external.h
@@ -0,0 +1,29 @@
+
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_EXTERNAL_H__
+#define __MALI_MEMORY_EXTERNAL_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+_mali_osk_errcode_t mali_mem_bind_ext_buf(mali_mem_allocation *alloc,
+ mali_mem_backend *mem_backend,
+ u32 phys_addr,
+ u32 flag);
+void mali_mem_unbind_ext_buf(mali_mem_backend *mem_backend);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_manager.c b/drivers/gpu/arm/utgard/linux/mali_memory_manager.c
new file mode 100644
index 000000000000..55e2c092d597
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_manager.c
@@ -0,0 +1,965 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+
+#include <linux/platform_device.h>
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include <linux/dma-buf.h>
+#endif
+#include <linux/idr.h>
+
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_linux.h"
+#include "mali_scheduler.h"
+#include "mali_memory.h"
+#include "mali_memory_os_alloc.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_dma_buf.h"
+#endif
+#if defined(CONFIG_MALI400_UMP)
+#include "mali_memory_ump.h"
+#endif
+#include "mali_memory_manager.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_util.h"
+#include "mali_memory_external.h"
+#include "mali_memory_cow.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_ukk.h"
+#include "mali_memory_swap_alloc.h"
+
+/*
+* New memory system interface
+*/
+
+/*inti idr for backend memory */
+struct idr mali_backend_idr;
+struct mutex mali_idr_mutex;
+
+/* init allocation manager */
+int mali_memory_manager_init(struct mali_allocation_manager *mgr)
+{
+ /* init Locks */
+ rwlock_init(&mgr->vm_lock);
+ mutex_init(&mgr->list_mutex);
+
+ /* init link */
+ INIT_LIST_HEAD(&mgr->head);
+
+ /* init RB tree */
+ mgr->allocation_mgr_rb = RB_ROOT;
+ mgr->mali_allocation_num = 0;
+ return 0;
+}
+
+/* Deinit allocation manager
+* Do some check for debug
+*/
+void mali_memory_manager_uninit(struct mali_allocation_manager *mgr)
+{
+ /* check RB tree is empty */
+ MALI_DEBUG_ASSERT(((void *)(mgr->allocation_mgr_rb.rb_node) == (void *)rb_last(&mgr->allocation_mgr_rb)));
+ /* check allocation List */
+ MALI_DEBUG_ASSERT(list_empty(&mgr->head));
+}
+
+/* Prepare memory descriptor */
+static mali_mem_allocation *mali_mem_allocation_struct_create(struct mali_session_data *session)
+{
+ mali_mem_allocation *mali_allocation;
+
+ /* Allocate memory */
+ mali_allocation = (mali_mem_allocation *)kzalloc(sizeof(mali_mem_allocation), GFP_KERNEL);
+ if (NULL == mali_allocation) {
+ MALI_DEBUG_PRINT(1, ("mali_mem_allocation_struct_create: descriptor was NULL\n"));
+ return NULL;
+ }
+
+ MALI_DEBUG_CODE(mali_allocation->magic = MALI_MEM_ALLOCATION_VALID_MAGIC);
+
+ /* do init */
+ mali_allocation->flags = 0;
+ mali_allocation->session = session;
+
+ INIT_LIST_HEAD(&mali_allocation->list);
+ _mali_osk_atomic_init(&mali_allocation->mem_alloc_refcount, 1);
+
+ /**
+ *add to session list
+ */
+ mutex_lock(&session->allocation_mgr.list_mutex);
+ list_add_tail(&mali_allocation->list, &session->allocation_mgr.head);
+ session->allocation_mgr.mali_allocation_num++;
+ mutex_unlock(&session->allocation_mgr.list_mutex);
+
+ return mali_allocation;
+}
+
+void mali_mem_allocation_struct_destory(mali_mem_allocation *alloc)
+{
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ MALI_DEBUG_ASSERT_POINTER(alloc->session);
+ mutex_lock(&alloc->session->allocation_mgr.list_mutex);
+ list_del(&alloc->list);
+ alloc->session->allocation_mgr.mali_allocation_num--;
+ mutex_unlock(&alloc->session->allocation_mgr.list_mutex);
+
+ kfree(alloc);
+}
+
+int mali_mem_backend_struct_create(mali_mem_backend **backend, u32 psize)
+{
+ mali_mem_backend *mem_backend = NULL;
+ s32 ret = -ENOSPC;
+ s32 index = -1;
+ *backend = (mali_mem_backend *)kzalloc(sizeof(mali_mem_backend), GFP_KERNEL);
+ if (NULL == *backend) {
+ MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: backend descriptor was NULL\n"));
+ return -1;
+ }
+ mem_backend = *backend;
+ mem_backend->size = psize;
+ mutex_init(&mem_backend->mutex);
+ INIT_LIST_HEAD(&mem_backend->list);
+ mem_backend->using_count = 0;
+
+
+ /* link backend with id */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+again:
+ if (!idr_pre_get(&mali_backend_idr, GFP_KERNEL)) {
+ kfree(mem_backend);
+ return -ENOMEM;
+ }
+ mutex_lock(&mali_idr_mutex);
+ ret = idr_get_new_above(&mali_backend_idr, mem_backend, 1, &index);
+ mutex_unlock(&mali_idr_mutex);
+
+ if (-ENOSPC == ret) {
+ kfree(mem_backend);
+ return -ENOSPC;
+ }
+ if (-EAGAIN == ret)
+ goto again;
+#else
+ mutex_lock(&mali_idr_mutex);
+ ret = idr_alloc(&mali_backend_idr, mem_backend, 1, MALI_S32_MAX, GFP_KERNEL);
+ mutex_unlock(&mali_idr_mutex);
+ index = ret;
+ if (ret < 0) {
+ MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: Can't allocate idr for backend! \n"));
+ kfree(mem_backend);
+ return -ENOSPC;
+ }
+#endif
+ return index;
+}
+
+
+static void mali_mem_backend_struct_destory(mali_mem_backend **backend, s32 backend_handle)
+{
+ mali_mem_backend *mem_backend = *backend;
+
+ mutex_lock(&mali_idr_mutex);
+ idr_remove(&mali_backend_idr, backend_handle);
+ mutex_unlock(&mali_idr_mutex);
+ kfree(mem_backend);
+ *backend = NULL;
+}
+
+mali_mem_backend *mali_mem_backend_struct_search(struct mali_session_data *session, u32 mali_address)
+{
+ struct mali_vma_node *mali_vma_node = NULL;
+ mali_mem_backend *mem_bkend = NULL;
+ mali_mem_allocation *mali_alloc = NULL;
+ MALI_DEBUG_ASSERT_POINTER(session);
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_address, 0);
+ if (NULL == mali_vma_node) {
+ MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_search:vma node was NULL\n"));
+ return NULL;
+ }
+ mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+ /* Get backend memory & Map on CPU */
+ mutex_lock(&mali_idr_mutex);
+ mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+ mutex_unlock(&mali_idr_mutex);
+ MALI_DEBUG_ASSERT(NULL != mem_bkend);
+ return mem_bkend;
+}
+
+static _mali_osk_errcode_t mali_mem_resize(struct mali_session_data *session, mali_mem_backend *mem_backend, u32 physical_size)
+{
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+ int retval = 0;
+ mali_mem_allocation *mali_allocation = NULL;
+ mali_mem_os_mem tmp_os_mem;
+ s32 change_page_count;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n"));
+ MALI_DEBUG_ASSERT(0 == physical_size % MALI_MMU_PAGE_SIZE);
+
+ mali_allocation = mem_backend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(mali_allocation);
+
+ MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE & mali_allocation->flags);
+ MALI_DEBUG_ASSERT(MALI_MEM_OS == mali_allocation->type);
+
+ mutex_lock(&mem_backend->mutex);
+
+ /* Do resize*/
+ if (physical_size > mem_backend->size) {
+ u32 add_size = physical_size - mem_backend->size;
+
+ MALI_DEBUG_ASSERT(0 == add_size % MALI_MMU_PAGE_SIZE);
+
+ /* Allocate new pages from os mem */
+ retval = mali_mem_os_alloc_pages(&tmp_os_mem, add_size);
+
+ if (retval) {
+ if (-ENOMEM == retval) {
+ ret = _MALI_OSK_ERR_NOMEM;
+ } else {
+ ret = _MALI_OSK_ERR_FAULT;
+ }
+ MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory allocation failed !\n"));
+ goto failed_alloc_memory;
+ }
+
+ MALI_DEBUG_ASSERT(tmp_os_mem.count == add_size / MALI_MMU_PAGE_SIZE);
+
+ /* Resize the memory of the backend */
+ ret = mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);
+
+ if (ret) {
+ MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory resizing failed !\n"));
+ goto failed_resize_pages;
+ }
+
+ /*Resize cpu mapping */
+ if (NULL != mali_allocation->cpu_mapping.vma) {
+ ret = mali_mem_os_resize_cpu_map_locked(mem_backend, mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start + mem_backend->size, add_size);
+ if (unlikely(ret != _MALI_OSK_ERR_OK)) {
+ MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: cpu mapping failed !\n"));
+ goto failed_cpu_map;
+ }
+ }
+
+ /* Resize mali mapping */
+ _mali_osk_mutex_wait(session->memory_lock);
+ ret = mali_mem_mali_map_resize(mali_allocation, physical_size);
+
+ if (ret) {
+ MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_resize: mali map resize fail !\n"));
+ goto failed_gpu_map;
+ }
+
+ ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, mali_allocation->mali_vma_node.vm_node.start,
+ mali_allocation->psize / MALI_MMU_PAGE_SIZE, add_size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);
+ if (ret) {
+ MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: mali mapping failed !\n"));
+ goto failed_gpu_map;
+ }
+
+ _mali_osk_mutex_signal(session->memory_lock);
+ } else {
+ u32 dec_size, page_count;
+ u32 vaddr = 0;
+ INIT_LIST_HEAD(&tmp_os_mem.pages);
+ tmp_os_mem.count = 0;
+
+ dec_size = mem_backend->size - physical_size;
+ MALI_DEBUG_ASSERT(0 == dec_size % MALI_MMU_PAGE_SIZE);
+
+ page_count = dec_size / MALI_MMU_PAGE_SIZE;
+ vaddr = mali_allocation->mali_vma_node.vm_node.start + physical_size;
+
+ /* Resize the memory of the backend */
+ ret = mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, physical_size / MALI_MMU_PAGE_SIZE, page_count);
+
+ if (ret) {
+ MALI_DEBUG_PRINT(4, ("_mali_ukk_mem_resize: mali map resize failed!\n"));
+ goto failed_resize_pages;
+ }
+
+ /* Resize mali map */
+ _mali_osk_mutex_wait(session->memory_lock);
+ mali_mem_mali_map_free(session, dec_size, vaddr, mali_allocation->flags);
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ /* Zap cpu mapping */
+ if (0 != mali_allocation->cpu_mapping.addr) {
+ MALI_DEBUG_ASSERT(NULL != mali_allocation->cpu_mapping.vma);
+ zap_vma_ptes(mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start + physical_size, dec_size);
+ }
+
+ /* Free those extra pages */
+ mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
+ }
+
+ /* Resize memory allocation and memory backend */
+ change_page_count = (s32)(physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE;
+ mali_allocation->psize = physical_size;
+ mem_backend->size = physical_size;
+ mutex_unlock(&mem_backend->mutex);
+
+ if (change_page_count > 0) {
+ atomic_add(change_page_count, &session->mali_mem_allocated_pages);
+ if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+ session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+ }
+
+ } else {
+ atomic_sub((s32)(-change_page_count), &session->mali_mem_allocated_pages);
+ }
+
+ return _MALI_OSK_ERR_OK;
+
+failed_gpu_map:
+ _mali_osk_mutex_signal(session->memory_lock);
+failed_cpu_map:
+ if (physical_size > mem_backend->size) {
+ mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, mem_backend->size / MALI_MMU_PAGE_SIZE,
+ (physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE);
+ } else {
+ mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count);
+ }
+failed_resize_pages:
+ if (0 != tmp_os_mem.count)
+ mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE);
+failed_alloc_memory:
+
+ mutex_unlock(&mem_backend->mutex);
+ return ret;
+}
+
+
+/* Set GPU MMU properties */
+static void _mali_memory_gpu_map_property_set(u32 *properties, u32 flags)
+{
+ if (_MALI_MEMORY_GPU_READ_ALLOCATE & flags) {
+ *properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
+ } else {
+ *properties = MALI_MMU_FLAGS_DEFAULT;
+ }
+}
+
+_mali_osk_errcode_t mali_mem_add_mem_size(struct mali_session_data *session, u32 mali_addr, u32 add_size)
+{
+ mali_mem_backend *mem_backend = NULL;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+ mali_mem_allocation *mali_allocation = NULL;
+ u32 new_physical_size;
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(0 == add_size % MALI_MMU_PAGE_SIZE);
+
+ /* Get the memory backend that need to be resize. */
+ mem_backend = mali_mem_backend_struct_search(session, mali_addr);
+
+ if (NULL == mem_backend) {
+ MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n"));
+ return ret;
+ }
+
+ mali_allocation = mem_backend->mali_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(mali_allocation);
+
+ new_physical_size = add_size + mem_backend->size;
+
+ if (new_physical_size > (mali_allocation->mali_vma_node.vm_node.size))
+ return ret;
+
+ MALI_DEBUG_ASSERT(new_physical_size != mem_backend->size);
+
+ ret = mali_mem_resize(session, mem_backend, new_physical_size);
+
+ return ret;
+}
+
+/**
+* function@_mali_ukk_mem_allocate - allocate mali memory
+*/
+_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args)
+{
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ mali_mem_backend *mem_backend = NULL;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+ int retval = 0;
+ mali_mem_allocation *mali_allocation = NULL;
+ struct mali_vma_node *mali_vma_node = NULL;
+
+ MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize));
+
+ /* Check if the address is allocated
+ */
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);
+
+ if (unlikely(mali_vma_node)) {
+ MALI_DEBUG_ASSERT(0);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ /**
+ *create mali memory allocation
+ */
+
+ mali_allocation = mali_mem_allocation_struct_create(session);
+
+ if (mali_allocation == NULL) {
+ MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+ mali_allocation->psize = args->psize;
+ mali_allocation->vsize = args->vsize;
+
+ /* MALI_MEM_OS if need to support mem resize,
+ * or MALI_MEM_BLOCK if have dedicated memory,
+ * or MALI_MEM_OS,
+ * or MALI_MEM_SWAP.
+ */
+ if (args->flags & _MALI_MEMORY_ALLOCATE_SWAPPABLE) {
+ mali_allocation->type = MALI_MEM_SWAP;
+ } else if (args->flags & _MALI_MEMORY_ALLOCATE_RESIZEABLE) {
+ mali_allocation->type = MALI_MEM_OS;
+ mali_allocation->flags |= MALI_MEM_FLAG_CAN_RESIZE;
+ } else if (MALI_TRUE == mali_memory_have_dedicated_memory()) {
+ mali_allocation->type = MALI_MEM_BLOCK;
+ } else {
+ mali_allocation->type = MALI_MEM_OS;
+ }
+
+ /**
+ *add allocation node to RB tree for index
+ */
+ mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr;
+ mali_allocation->mali_vma_node.vm_node.size = args->vsize;
+
+ mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+
+ mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize);
+ if (mali_allocation->backend_handle < 0) {
+ ret = _MALI_OSK_ERR_NOMEM;
+ MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
+ goto failed_alloc_backend;
+ }
+
+
+ mem_backend->mali_allocation = mali_allocation;
+ mem_backend->type = mali_allocation->type;
+
+ mali_allocation->mali_mapping.addr = args->gpu_vaddr;
+
+ /* set gpu mmu propery */
+ _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
+ /* do prepare for MALI mapping */
+ if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
+ _mali_osk_mutex_wait(session->memory_lock);
+
+ ret = mali_mem_mali_map_prepare(mali_allocation);
+ if (0 != ret) {
+ _mali_osk_mutex_signal(session->memory_lock);
+ goto failed_prepare_map;
+ }
+ _mali_osk_mutex_signal(session->memory_lock);
+ }
+
+ if (mali_allocation->psize == 0) {
+ mem_backend->os_mem.count = 0;
+ INIT_LIST_HEAD(&mem_backend->os_mem.pages);
+ goto done;
+ }
+
+ if (args->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) {
+ mali_allocation->flags |= _MALI_MEMORY_ALLOCATE_DEFER_BIND;
+ mem_backend->flags |= MALI_MEM_BACKEND_FLAG_NOT_BINDED;
+ /* init for defer bind backend*/
+ mem_backend->os_mem.count = 0;
+ INIT_LIST_HEAD(&mem_backend->os_mem.pages);
+
+ goto done;
+ }
+ /**
+ *allocate physical memory
+ */
+ if (likely(mali_allocation->psize > 0)) {
+
+ if (mem_backend->type == MALI_MEM_OS) {
+ retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
+ } else if (mem_backend->type == MALI_MEM_BLOCK) {
+ /* try to allocated from BLOCK memory first, then try OS memory if failed.*/
+ if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) {
+ retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size);
+ mem_backend->type = MALI_MEM_OS;
+ mali_allocation->type = MALI_MEM_OS;
+ }
+ } else if (MALI_MEM_SWAP == mem_backend->type) {
+ retval = mali_mem_swap_alloc_pages(&mem_backend->swap_mem, mali_allocation->mali_vma_node.vm_node.size, &mem_backend->start_idx);
+ } else {
+ /* ONLY support mem_os type */
+ MALI_DEBUG_ASSERT(0);
+ }
+
+ if (retval) {
+ ret = _MALI_OSK_ERR_NOMEM;
+ MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n"));
+ goto failed_alloc_pages;
+ }
+ }
+
+ /**
+ *map to GPU side
+ */
+ if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) {
+ _mali_osk_mutex_wait(session->memory_lock);
+ /* Map on Mali */
+
+ if (mem_backend->type == MALI_MEM_OS) {
+ ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, args->gpu_vaddr, 0,
+ mem_backend->size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties);
+
+ } else if (mem_backend->type == MALI_MEM_BLOCK) {
+ mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr,
+ mali_allocation->mali_mapping.properties);
+ } else if (mem_backend->type == MALI_MEM_SWAP) {
+ ret = mali_mem_swap_mali_map(&mem_backend->swap_mem, session, args->gpu_vaddr,
+ mali_allocation->mali_mapping.properties);
+ } else { /* unsupport type */
+ MALI_DEBUG_ASSERT(0);
+ }
+
+ _mali_osk_mutex_signal(session->memory_lock);
+ }
+done:
+ if (MALI_MEM_OS == mem_backend->type) {
+ atomic_add(mem_backend->os_mem.count, &session->mali_mem_allocated_pages);
+ } else if (MALI_MEM_BLOCK == mem_backend->type) {
+ atomic_add(mem_backend->block_mem.count, &session->mali_mem_allocated_pages);
+ } else {
+ MALI_DEBUG_ASSERT(MALI_MEM_SWAP == mem_backend->type);
+ atomic_add(mem_backend->swap_mem.count, &session->mali_mem_allocated_pages);
+ atomic_add(mem_backend->swap_mem.count, &session->mali_mem_array[mem_backend->type]);
+ }
+
+ if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+ session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+ }
+ return _MALI_OSK_ERR_OK;
+
+failed_alloc_pages:
+ mali_mem_mali_map_free(session, mali_allocation->psize, mali_allocation->mali_vma_node.vm_node.start, mali_allocation->flags);
+failed_prepare_map:
+ mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
+failed_alloc_backend:
+
+ mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+ mali_mem_allocation_struct_destory(mali_allocation);
+
+ return ret;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args)
+{
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ u32 vaddr = args->gpu_vaddr;
+ mali_mem_allocation *mali_alloc = NULL;
+ struct mali_vma_node *mali_vma_node = NULL;
+
+ /* find mali allocation structure by vaddress*/
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, vaddr, 0);
+ if (NULL == mali_vma_node) {
+ MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_free: invalid addr: 0x%x\n", vaddr));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+ MALI_DEBUG_ASSERT(NULL != mali_vma_node);
+ mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+
+ if (mali_alloc)
+ /* check ref_count */
+ args->free_pages_nr = mali_allocation_unref(&mali_alloc);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+/**
+* Function _mali_ukk_mem_bind -- bind a external memory to a new GPU address
+* It will allocate a new mem allocation and bind external memory to it.
+* Supported backend type are:
+* _MALI_MEMORY_BIND_BACKEND_UMP
+* _MALI_MEMORY_BIND_BACKEND_DMA_BUF
+* _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY
+* CPU access is not supported yet
+*/
+_mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args)
+{
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ mali_mem_backend *mem_backend = NULL;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+ mali_mem_allocation *mali_allocation = NULL;
+ MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_bind, vaddr=0x%x, size =0x%x! \n", args->vaddr, args->size));
+
+ /**
+ * allocate mali allocation.
+ */
+ mali_allocation = mali_mem_allocation_struct_create(session);
+
+ if (mali_allocation == NULL) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+ mali_allocation->psize = args->size;
+ mali_allocation->vsize = args->size;
+ mali_allocation->mali_mapping.addr = args->vaddr;
+
+ /* add allocation node to RB tree for index */
+ mali_allocation->mali_vma_node.vm_node.start = args->vaddr;
+ mali_allocation->mali_vma_node.vm_node.size = args->size;
+ mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+
+ /* allocate backend*/
+ if (mali_allocation->psize > 0) {
+ mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize);
+ if (mali_allocation->backend_handle < 0) {
+ goto Failed_alloc_backend;
+ }
+
+ } else {
+ goto Failed_alloc_backend;
+ }
+
+ mem_backend->size = mali_allocation->psize;
+ mem_backend->mali_allocation = mali_allocation;
+
+ switch (args->flags & _MALI_MEMORY_BIND_BACKEND_MASK) {
+ case _MALI_MEMORY_BIND_BACKEND_UMP:
+#if defined(CONFIG_MALI400_UMP)
+ mali_allocation->type = MALI_MEM_UMP;
+ mem_backend->type = MALI_MEM_UMP;
+ ret = mali_mem_bind_ump_buf(mali_allocation, mem_backend,
+ args->mem_union.bind_ump.secure_id, args->mem_union.bind_ump.flags);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_DEBUG_PRINT(1, ("Bind ump buf failed\n"));
+ goto Failed_bind_backend;
+ }
+#else
+ MALI_DEBUG_PRINT(1, ("UMP not supported\n"));
+ goto Failed_bind_backend;
+#endif
+ break;
+ case _MALI_MEMORY_BIND_BACKEND_DMA_BUF:
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+ mali_allocation->type = MALI_MEM_DMA_BUF;
+ mem_backend->type = MALI_MEM_DMA_BUF;
+ ret = mali_mem_bind_dma_buf(mali_allocation, mem_backend,
+ args->mem_union.bind_dma_buf.mem_fd, args->mem_union.bind_dma_buf.flags);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_DEBUG_PRINT(1, ("Bind dma buf failed\n"));
+ goto Failed_bind_backend;
+ }
+#else
+ MALI_DEBUG_PRINT(1, ("DMA not supported\n"));
+ goto Failed_bind_backend;
+#endif
+ break;
+ case _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY:
+ /* not allowed */
+ MALI_DEBUG_ASSERT(0);
+ break;
+
+ case _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY:
+ mali_allocation->type = MALI_MEM_EXTERNAL;
+ mem_backend->type = MALI_MEM_EXTERNAL;
+ ret = mali_mem_bind_ext_buf(mali_allocation, mem_backend, args->mem_union.bind_ext_memory.phys_addr,
+ args->mem_union.bind_ext_memory.flags);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_DEBUG_PRINT(1, ("Bind external buf failed\n"));
+ goto Failed_bind_backend;
+ }
+ break;
+
+ case _MALI_MEMORY_BIND_BACKEND_EXT_COW:
+ /* not allowed */
+ MALI_DEBUG_ASSERT(0);
+ break;
+
+ default:
+ MALI_DEBUG_ASSERT(0);
+ break;
+ }
+ MALI_DEBUG_ASSERT(0 == mem_backend->size % MALI_MMU_PAGE_SIZE);
+ atomic_add(mem_backend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_backend->type]);
+ return _MALI_OSK_ERR_OK;
+
+Failed_bind_backend:
+ mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
+
+Failed_alloc_backend:
+ mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+ mali_mem_allocation_struct_destory(mali_allocation);
+
+ MALI_DEBUG_PRINT(1, (" _mali_ukk_mem_bind, return ERROR! \n"));
+ return ret;
+}
+
+
+/*
+* Function _mali_ukk_mem_unbind -- unbind a external memory to a new GPU address
+* This function unbind the backend memory and free the allocation
+* no ref_count for this type of memory
+*/
+_mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args)
+{
+ /**/
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ mali_mem_allocation *mali_allocation = NULL;
+ struct mali_vma_node *mali_vma_node = NULL;
+ u32 mali_addr = args->vaddr;
+ MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_unbind, vaddr=0x%x! \n", args->vaddr));
+
+ /* find the allocation by vaddr */
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+ if (likely(mali_vma_node)) {
+ MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start);
+ mali_allocation = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+ } else {
+ MALI_DEBUG_ASSERT(NULL != mali_vma_node);
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ if (NULL != mali_allocation)
+ /* check ref_count */
+ mali_allocation_unref(&mali_allocation);
+ return _MALI_OSK_ERR_OK;
+}
+
+/*
+* Function _mali_ukk_mem_cow -- COW for an allocation
+* This function allocate new pages for a range (range, range+size) of allocation
+* And Map it(keep use the not in range pages from target allocation ) to an GPU vaddr
+*/
+_mali_osk_errcode_t _mali_ukk_mem_cow(_mali_uk_cow_mem_s *args)
+{
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+ mali_mem_backend *target_backend = NULL;
+ mali_mem_backend *mem_backend = NULL;
+ struct mali_vma_node *mali_vma_node = NULL;
+ mali_mem_allocation *mali_allocation = NULL;
+
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ /* Get the target backend for cow */
+ target_backend = mali_mem_backend_struct_search(session, args->target_handle);
+
+ if (NULL == target_backend || 0 == target_backend->size) {
+ MALI_DEBUG_ASSERT_POINTER(target_backend);
+ MALI_DEBUG_ASSERT(0 != target_backend->size);
+ return ret;
+ }
+
+ /*Cow not support resized mem */
+ MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE != (MALI_MEM_FLAG_CAN_RESIZE & target_backend->mali_allocation->flags));
+
+ /* Check if the new mali address is allocated */
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->vaddr, 0);
+
+ if (unlikely(mali_vma_node)) {
+ MALI_DEBUG_ASSERT(0);
+ return ret;
+ }
+
+ /* create new alloction for COW*/
+ mali_allocation = mali_mem_allocation_struct_create(session);
+ if (mali_allocation == NULL) {
+ MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to create allocation struct!\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+ mali_allocation->psize = args->target_size;
+ mali_allocation->vsize = args->target_size;
+ mali_allocation->type = MALI_MEM_COW;
+
+ /*add allocation node to RB tree for index*/
+ mali_allocation->mali_vma_node.vm_node.start = args->vaddr;
+ mali_allocation->mali_vma_node.vm_node.size = mali_allocation->vsize;
+ mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+
+ /* create new backend for COW memory */
+ mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize);
+ if (mali_allocation->backend_handle < 0) {
+ ret = _MALI_OSK_ERR_NOMEM;
+ MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n"));
+ goto failed_alloc_backend;
+ }
+ mem_backend->mali_allocation = mali_allocation;
+ mem_backend->type = mali_allocation->type;
+
+ if (target_backend->type == MALI_MEM_SWAP ||
+ (MALI_MEM_COW == target_backend->type && (MALI_MEM_BACKEND_FLAG_SWAP_COWED & target_backend->flags))) {
+ mem_backend->flags |= MALI_MEM_BACKEND_FLAG_SWAP_COWED;
+ /**
+ * CoWed swap backends couldn't be mapped as non-linear vma, because if one
+ * vma is set with flag VM_NONLINEAR, the vma->vm_private_data will be used by kernel,
+ * while in mali driver, we use this variable to store the pointer of mali_allocation, so there
+ * is a conflict.
+ * To resolve this problem, we have to do some fake things, we reserved about 64MB
+ * space from index 0, there isn't really page's index will be set from 0 to (64MB>>PAGE_SHIFT_NUM),
+ * and all of CoWed swap memory backends' start_idx will be assigned with 0, and these
+ * backends will be mapped as linear and will add to priority tree of global swap file, while
+ * these vmas will never be found by using normal page->index, these pages in those vma
+ * also couldn't be swapped out.
+ */
+ mem_backend->start_idx = 0;
+ }
+
+ /* Add the target backend's cow count, also allocate new pages for COW backend from os mem
+ *for a modified range and keep the page which not in the modified range and Add ref to it
+ */
+ MALI_DEBUG_PRINT(3, ("Cow mapping: target_addr: 0x%x; cow_addr: 0x%x, size: %u\n", target_backend->mali_allocation->mali_vma_node.vm_node.start,
+ mali_allocation->mali_vma_node.vm_node.start, mali_allocation->mali_vma_node.vm_node.size));
+
+ ret = mali_memory_do_cow(target_backend, args->target_offset, args->target_size, mem_backend, args->range_start, args->range_size);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to cow!\n"));
+ goto failed_do_cow;
+ }
+
+ /**
+ *map to GPU side
+ */
+ mali_allocation->mali_mapping.addr = args->vaddr;
+ /* set gpu mmu propery */
+ _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags);
+
+ _mali_osk_mutex_wait(session->memory_lock);
+ /* Map on Mali */
+ ret = mali_mem_mali_map_prepare(mali_allocation);
+ if (0 != ret) {
+ MALI_DEBUG_PRINT(1, (" prepare map fail! \n"));
+ goto failed_gpu_map;
+ }
+
+ if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
+ mali_mem_cow_mali_map(mem_backend, 0, mem_backend->size);
+ }
+
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ mutex_lock(&target_backend->mutex);
+ target_backend->flags |= MALI_MEM_BACKEND_FLAG_COWED;
+ mutex_unlock(&target_backend->mutex);
+
+ atomic_add(args->range_size / MALI_MMU_PAGE_SIZE, &session->mali_mem_allocated_pages);
+ if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+ session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+ }
+ return _MALI_OSK_ERR_OK;
+
+failed_gpu_map:
+ _mali_osk_mutex_signal(session->memory_lock);
+ mali_mem_cow_release(mem_backend, MALI_FALSE);
+ mem_backend->cow_mem.count = 0;
+failed_do_cow:
+ mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle);
+failed_alloc_backend:
+ mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node);
+ mali_mem_allocation_struct_destory(mali_allocation);
+
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_ukk_mem_cow_modify_range(_mali_uk_cow_modify_range_s *args)
+{
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+ mali_mem_backend *mem_backend = NULL;
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_cow_modify_range called! \n"));
+ /* Get the backend that need to be modified. */
+ mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
+
+ if (NULL == mem_backend || 0 == mem_backend->size) {
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ MALI_DEBUG_ASSERT(0 != mem_backend->size);
+ return ret;
+ }
+
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_backend->type);
+
+ ret = mali_memory_cow_modify_range(mem_backend, args->range_start, args->size);
+ args->change_pages_nr = mem_backend->cow_mem.change_pages_nr;
+ if (_MALI_OSK_ERR_OK != ret)
+ return ret;
+ _mali_osk_mutex_wait(session->memory_lock);
+ if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) {
+ mali_mem_cow_mali_map(mem_backend, args->range_start, args->size);
+ }
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ atomic_add(args->change_pages_nr, &session->mali_mem_allocated_pages);
+ if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
+ session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_mem_resize(_mali_uk_mem_resize_s *args)
+{
+ mali_mem_backend *mem_backend = NULL;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n"));
+ MALI_DEBUG_ASSERT(0 == args->psize % MALI_MMU_PAGE_SIZE);
+
+ /* Get the memory backend that need to be resize. */
+ mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
+
+ if (NULL == mem_backend) {
+ MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n"));
+ return ret;
+ }
+
+ MALI_DEBUG_ASSERT(args->psize != mem_backend->size);
+
+ ret = mali_mem_resize(session, mem_backend, args->psize);
+
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_ukk_mem_usage_get(_mali_uk_profiling_memory_usage_get_s *args)
+{
+ args->memory_usage = _mali_ukk_report_memory_usage();
+ if (0 != args->vaddr) {
+ mali_mem_backend *mem_backend = NULL;
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ /* Get the backend that need to be modified. */
+ mem_backend = mali_mem_backend_struct_search(session, args->vaddr);
+ if (NULL == mem_backend) {
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (MALI_MEM_COW == mem_backend->type)
+ args->change_pages_nr = mem_backend->cow_mem.change_pages_nr;
+ }
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_manager.h b/drivers/gpu/arm/utgard/linux/mali_memory_manager.h
new file mode 100644
index 000000000000..c454b9354676
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_manager.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_MANAGER_H__
+#define __MALI_MEMORY_MANAGER_H__
+
+#include "mali_osk.h"
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include "mali_memory_types.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_uk_types.h"
+
+struct mali_allocation_manager {
+ rwlock_t vm_lock;
+ struct rb_root allocation_mgr_rb;
+ struct list_head head;
+ struct mutex list_mutex;
+ u32 mali_allocation_num;
+};
+
+extern struct idr mali_backend_idr;
+extern struct mutex mali_idr_mutex;
+
+int mali_memory_manager_init(struct mali_allocation_manager *mgr);
+void mali_memory_manager_uninit(struct mali_allocation_manager *mgr);
+
+void mali_mem_allocation_struct_destory(mali_mem_allocation *alloc);
+_mali_osk_errcode_t mali_mem_add_mem_size(struct mali_session_data *session, u32 mali_addr, u32 add_size);
+mali_mem_backend *mali_mem_backend_struct_search(struct mali_session_data *session, u32 mali_address);
+_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_cow(_mali_uk_cow_mem_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_cow_modify_range(_mali_uk_cow_modify_range_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_usage_get(_mali_uk_profiling_memory_usage_get_s *args);
+_mali_osk_errcode_t _mali_ukk_mem_resize(_mali_uk_mem_resize_s *args);
+
+#endif
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_os_alloc.c b/drivers/gpu/arm/utgard/linux/mali_memory_os_alloc.c
new file mode 100644
index 000000000000..1a6cc0649421
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_os_alloc.c
@@ -0,0 +1,805 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#include "mali_osk.h"
+#include "mali_memory.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_kernel_linux.h"
+
+/* Minimum size of allocator page pool */
+#define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
+#define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+/* Write combine dma_attrs */
+static DEFINE_DMA_ATTRS(dma_attrs_wc);
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
+static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask);
+#else
+static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask);
+#endif
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
+#else
+static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc);
+static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc);
+#endif
+#endif
+static void mali_mem_os_trim_pool(struct work_struct *work);
+
+struct mali_mem_os_allocator mali_mem_os_allocator = {
+ .pool_lock = __SPIN_LOCK_UNLOCKED(pool_lock),
+ .pool_pages = LIST_HEAD_INIT(mali_mem_os_allocator.pool_pages),
+ .pool_count = 0,
+
+ .allocated_pages = ATOMIC_INIT(0),
+ .allocation_limit = 0,
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+ .shrinker.shrink = mali_mem_os_shrink,
+#else
+ .shrinker.count_objects = mali_mem_os_shrink_count,
+ .shrinker.scan_objects = mali_mem_os_shrink,
+#endif
+ .shrinker.seeks = DEFAULT_SEEKS,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+ .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool, TIMER_DEFERRABLE),
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
+ .timed_shrinker = __DEFERRED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
+#else
+ .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool),
+#endif
+};
+
+u32 mali_mem_os_free(struct list_head *os_pages, u32 pages_count, mali_bool cow_flag)
+{
+ LIST_HEAD(pages);
+ struct mali_page_node *m_page, *m_tmp;
+ u32 free_pages_nr = 0;
+
+ if (MALI_TRUE == cow_flag) {
+ list_for_each_entry_safe(m_page, m_tmp, os_pages, list) {
+ /*only handle OS node here */
+ if (m_page->type == MALI_PAGE_NODE_OS) {
+ if (1 == _mali_page_node_get_ref_count(m_page)) {
+ list_move(&m_page->list, &pages);
+ atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
+ free_pages_nr ++;
+ } else {
+ _mali_page_node_unref(m_page);
+ m_page->page = NULL;
+ list_del(&m_page->list);
+ kfree(m_page);
+ }
+ }
+ }
+ } else {
+ list_cut_position(&pages, os_pages, os_pages->prev);
+ atomic_sub(pages_count, &mali_mem_os_allocator.allocated_pages);
+ free_pages_nr = pages_count;
+ }
+
+ /* Put pages on pool. */
+ spin_lock(&mali_mem_os_allocator.pool_lock);
+ list_splice(&pages, &mali_mem_os_allocator.pool_pages);
+ mali_mem_os_allocator.pool_count += free_pages_nr;
+ spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+ if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+ MALI_DEBUG_PRINT(5, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
+ queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
+ }
+ return free_pages_nr;
+}
+
+/**
+* put page without put it into page pool
+*/
+_mali_osk_errcode_t mali_mem_os_put_page(struct page *page)
+{
+ MALI_DEBUG_ASSERT_POINTER(page);
+ if (1 == page_count(page)) {
+ atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
+ dma_unmap_page(&mali_platform_device->dev, page_private(page),
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+ ClearPagePrivate(page);
+ }
+ put_page(page);
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mem_os_resize_pages(mali_mem_os_mem *mem_from, mali_mem_os_mem *mem_to, u32 start_page, u32 page_count)
+{
+ struct mali_page_node *m_page, *m_tmp;
+ u32 i = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(mem_from);
+ MALI_DEBUG_ASSERT_POINTER(mem_to);
+
+ if (mem_from->count < start_page + page_count) {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ list_for_each_entry_safe(m_page, m_tmp, &mem_from->pages, list) {
+ if (i >= start_page && i < start_page + page_count) {
+ list_move_tail(&m_page->list, &mem_to->pages);
+ mem_from->count--;
+ mem_to->count++;
+ }
+ i++;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size)
+{
+ struct page *new_page;
+ LIST_HEAD(pages_list);
+ size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE;
+ size_t remaining = page_count;
+ struct mali_page_node *m_page, *m_tmp;
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(os_mem);
+
+ if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
+ MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
+ size,
+ atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
+ mali_mem_os_allocator.allocation_limit));
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&os_mem->pages);
+ os_mem->count = page_count;
+
+ /* Grab pages from pool. */
+ {
+ size_t pool_pages;
+ spin_lock(&mali_mem_os_allocator.pool_lock);
+ pool_pages = min(remaining, mali_mem_os_allocator.pool_count);
+ for (i = pool_pages; i > 0; i--) {
+ BUG_ON(list_empty(&mali_mem_os_allocator.pool_pages));
+ list_move(mali_mem_os_allocator.pool_pages.next, &pages_list);
+ }
+ mali_mem_os_allocator.pool_count -= pool_pages;
+ remaining -= pool_pages;
+ spin_unlock(&mali_mem_os_allocator.pool_lock);
+ }
+
+ /* Process pages from pool. */
+ i = 0;
+ list_for_each_entry_safe(m_page, m_tmp, &pages_list, list) {
+ BUG_ON(NULL == m_page);
+
+ list_move_tail(&m_page->list, &os_mem->pages);
+ }
+
+ /* Allocate new pages, if needed. */
+ for (i = 0; i < remaining; i++) {
+ dma_addr_t dma_addr;
+ gfp_t flags = __GFP_ZERO | __GFP_NORETRY | __GFP_NOWARN | __GFP_COLD;
+ int err;
+
+#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
+ flags |= GFP_HIGHUSER;
+#else
+#ifdef CONFIG_ZONE_DMA32
+ flags |= GFP_DMA32;
+#else
+#ifdef CONFIG_ZONE_DMA
+ flags |= GFP_DMA;
+#else
+ /* arm64 utgard only work on < 4G, but the kernel
+ * didn't provide method to allocte memory < 4G
+ */
+ MALI_DEBUG_ASSERT(0);
+#endif
+#endif
+#endif
+
+ new_page = alloc_page(flags);
+
+ if (unlikely(NULL == new_page)) {
+ /* Calculate the number of pages actually allocated, and free them. */
+ os_mem->count = (page_count - remaining) + i;
+ atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
+ mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
+ return -ENOMEM;
+ }
+
+ /* Ensure page is flushed from CPU caches. */
+ dma_addr = dma_map_page(&mali_platform_device->dev, new_page,
+ 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+
+ err = dma_mapping_error(&mali_platform_device->dev, dma_addr);
+ if (unlikely(err)) {
+ MALI_DEBUG_PRINT_ERROR(("OS Mem: Failed to DMA map page %p: %u",
+ new_page, err));
+ __free_page(new_page);
+ os_mem->count = (page_count - remaining) + i;
+ atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
+ mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
+ return -EFAULT;
+ }
+
+ /* Store page phys addr */
+ SetPagePrivate(new_page);
+ set_page_private(new_page, dma_addr);
+
+ m_page = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
+ if (unlikely(NULL == m_page)) {
+ MALI_PRINT_ERROR(("OS Mem: Can't allocate mali_page node! \n"));
+ dma_unmap_page(&mali_platform_device->dev, page_private(new_page),
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+ ClearPagePrivate(new_page);
+ __free_page(new_page);
+ os_mem->count = (page_count - remaining) + i;
+ atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages);
+ mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE);
+ return -EFAULT;
+ }
+ m_page->page = new_page;
+
+ list_add_tail(&m_page->list, &os_mem->pages);
+ }
+
+ atomic_add(page_count, &mali_mem_os_allocator.allocated_pages);
+
+ if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
+ MALI_DEBUG_PRINT(4, ("OS Mem: Stopping pool trim timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
+ cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
+ }
+
+ return 0;
+}
+
+
+_mali_osk_errcode_t mali_mem_os_mali_map(mali_mem_os_mem *os_mem, struct mali_session_data *session, u32 vaddr, u32 start_page, u32 mapping_pgae_num, u32 props)
+{
+ struct mali_page_directory *pagedir = session->page_directory;
+ struct mali_page_node *m_page;
+ u32 virt;
+ u32 prop = props;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(os_mem);
+
+ MALI_DEBUG_ASSERT(start_page <= os_mem->count);
+ MALI_DEBUG_ASSERT((start_page + mapping_pgae_num) <= os_mem->count);
+
+ if ((start_page + mapping_pgae_num) == os_mem->count) {
+
+ virt = vaddr + MALI_MMU_PAGE_SIZE * (start_page + mapping_pgae_num);
+
+ list_for_each_entry_reverse(m_page, &os_mem->pages, list) {
+
+ virt -= MALI_MMU_PAGE_SIZE;
+ if (mapping_pgae_num > 0) {
+ dma_addr_t phys = page_private(m_page->page);
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+ /* Verify that the "physical" address is 32-bit and
+ * usable for Mali, when on a system with bus addresses
+ * wider than 32-bit. */
+ MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+ mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
+ } else {
+ break;
+ }
+ mapping_pgae_num--;
+ }
+
+ } else {
+ u32 i = 0;
+ virt = vaddr;
+ list_for_each_entry(m_page, &os_mem->pages, list) {
+
+ if (i >= start_page) {
+ dma_addr_t phys = page_private(m_page->page);
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+ /* Verify that the "physical" address is 32-bit and
+ * usable for Mali, when on a system with bus addresses
+ * wider than 32-bit. */
+ MALI_DEBUG_ASSERT(0 == (phys >> 32));
+#endif
+ mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop);
+ }
+ i++;
+ virt += MALI_MMU_PAGE_SIZE;
+ }
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+
+void mali_mem_os_mali_unmap(mali_mem_allocation *alloc)
+{
+ struct mali_session_data *session;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ mali_session_memory_lock(session);
+ mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+ alloc->flags);
+ mali_session_memory_unlock(session);
+}
+
+int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
+{
+ mali_mem_os_mem *os_mem = &mem_bkend->os_mem;
+ struct mali_page_node *m_page;
+ struct page *page;
+ int ret;
+ unsigned long addr = vma->vm_start;
+ MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type);
+
+ list_for_each_entry(m_page, &os_mem->pages, list) {
+ /* We should use vm_insert_page, but it does a dcache
+ * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
+ ret = vm_insert_page(vma, addr, page);
+ */
+ page = m_page->page;
+ ret = vm_insert_pfn(vma, addr, page_to_pfn(page));
+
+ if (unlikely(0 != ret)) {
+ return -EFAULT;
+ }
+ addr += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+_mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bkend, struct vm_area_struct *vma, unsigned long start_vaddr, u32 mappig_size)
+{
+ mali_mem_os_mem *os_mem = &mem_bkend->os_mem;
+ struct mali_page_node *m_page;
+ int ret;
+ int offset;
+ int mapping_page_num;
+ int count ;
+
+ unsigned long vstart = vma->vm_start;
+ count = 0;
+ MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS);
+ MALI_DEBUG_ASSERT(0 == start_vaddr % _MALI_OSK_MALI_PAGE_SIZE);
+ MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE);
+ offset = (start_vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE;
+ MALI_DEBUG_ASSERT(offset <= os_mem->count);
+ mapping_page_num = mappig_size / _MALI_OSK_MALI_PAGE_SIZE;
+ MALI_DEBUG_ASSERT((offset + mapping_page_num) <= os_mem->count);
+
+ if ((offset + mapping_page_num) == os_mem->count) {
+
+ unsigned long vm_end = start_vaddr + mappig_size;
+
+ list_for_each_entry_reverse(m_page, &os_mem->pages, list) {
+
+ vm_end -= _MALI_OSK_MALI_PAGE_SIZE;
+ if (mapping_page_num > 0) {
+ ret = vm_insert_pfn(vma, vm_end, page_to_pfn(m_page->page));
+
+ if (unlikely(0 != ret)) {
+ /*will return -EBUSY If the page has already been mapped into table, but it's OK*/
+ if (-EBUSY == ret) {
+ break;
+ } else {
+ MALI_DEBUG_PRINT(1, ("OS Mem: mali_mem_os_resize_cpu_map_locked failed, ret = %d, offset is %d,page_count is %d\n",
+ ret, offset + mapping_page_num, os_mem->count));
+ }
+ return _MALI_OSK_ERR_FAULT;
+ }
+ } else {
+ break;
+ }
+ mapping_page_num--;
+
+ }
+ } else {
+
+ list_for_each_entry(m_page, &os_mem->pages, list) {
+ if (count >= offset) {
+
+ ret = vm_insert_pfn(vma, vstart, page_to_pfn(m_page->page));
+
+ if (unlikely(0 != ret)) {
+ /*will return -EBUSY If the page has already been mapped into table, but it's OK*/
+ if (-EBUSY == ret) {
+ break;
+ } else {
+ MALI_DEBUG_PRINT(1, ("OS Mem: mali_mem_os_resize_cpu_map_locked failed, ret = %d, count is %d, offset is %d,page_count is %d\n",
+ ret, count, offset, os_mem->count));
+ }
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+ count++;
+ vstart += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 mali_mem_os_release(mali_mem_backend *mem_bkend)
+{
+
+ mali_mem_allocation *alloc;
+ u32 free_pages_nr = 0;
+ MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+ MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type);
+
+ alloc = mem_bkend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+
+ /* Unmap the memory from the mali virtual address space. */
+ mali_mem_os_mali_unmap(alloc);
+ mutex_lock(&mem_bkend->mutex);
+ /* Free pages */
+ if (MALI_MEM_BACKEND_FLAG_COWED & mem_bkend->flags) {
+ free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_TRUE);
+ } else {
+ free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_FALSE);
+ }
+ mutex_unlock(&mem_bkend->mutex);
+
+ MALI_DEBUG_PRINT(4, ("OS Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->os_mem.count * _MALI_OSK_MALI_PAGE_SIZE,
+ free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
+
+ mem_bkend->os_mem.count = 0;
+ return free_pages_nr;
+}
+
+
+#define MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE 128
+static struct {
+ struct {
+ mali_dma_addr phys;
+ mali_io_address mapping;
+ } page[MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE];
+ size_t count;
+ spinlock_t lock;
+} mali_mem_page_table_page_pool = {
+ .count = 0,
+ .lock = __SPIN_LOCK_UNLOCKED(pool_lock),
+};
+
+_mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping)
+{
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_NOMEM;
+ dma_addr_t tmp_phys;
+
+ spin_lock(&mali_mem_page_table_page_pool.lock);
+ if (0 < mali_mem_page_table_page_pool.count) {
+ u32 i = --mali_mem_page_table_page_pool.count;
+ *phys = mali_mem_page_table_page_pool.page[i].phys;
+ *mapping = mali_mem_page_table_page_pool.page[i].mapping;
+
+ ret = _MALI_OSK_ERR_OK;
+ }
+ spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+ if (_MALI_OSK_ERR_OK != ret) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ *mapping = dma_alloc_attrs(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
+ GFP_KERNEL, &dma_attrs_wc);
+#else
+ *mapping = dma_alloc_writecombine(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys, GFP_KERNEL);
+#endif
+ if (NULL != *mapping) {
+ ret = _MALI_OSK_ERR_OK;
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+ /* Verify that the "physical" address is 32-bit and
+ * usable for Mali, when on a system with bus addresses
+ * wider than 32-bit. */
+ MALI_DEBUG_ASSERT(0 == (tmp_phys >> 32));
+#endif
+
+ *phys = (mali_dma_addr)tmp_phys;
+ }
+ }
+
+ return ret;
+}
+
+void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt)
+{
+ spin_lock(&mali_mem_page_table_page_pool.lock);
+ if (MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE > mali_mem_page_table_page_pool.count) {
+ u32 i = mali_mem_page_table_page_pool.count;
+ mali_mem_page_table_page_pool.page[i].phys = phys;
+ mali_mem_page_table_page_pool.page[i].mapping = virt;
+
+ ++mali_mem_page_table_page_pool.count;
+
+ spin_unlock(&mali_mem_page_table_page_pool.lock);
+ } else {
+ spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ dma_free_attrs(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, virt, phys,
+ &dma_attrs_wc);
+#else
+ dma_free_writecombine(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, virt, phys);
+#endif
+ }
+}
+
+void mali_mem_os_free_page_node(struct mali_page_node *m_page)
+{
+ struct page *page = m_page->page;
+ MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_OS);
+
+ if (1 == page_count(page)) {
+ dma_unmap_page(&mali_platform_device->dev, page_private(page),
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+ ClearPagePrivate(page);
+ }
+ __free_page(page);
+ m_page->page = NULL;
+ list_del(&m_page->list);
+ kfree(m_page);
+}
+
+/* The maximum number of page table pool pages to free in one go. */
+#define MALI_MEM_OS_CHUNK_TO_FREE 64UL
+
+/* Free a certain number of pages from the page table page pool.
+ * The pool lock must be held when calling the function, and the lock will be
+ * released before returning.
+ */
+static void mali_mem_os_page_table_pool_free(size_t nr_to_free)
+{
+ mali_dma_addr phys_arr[MALI_MEM_OS_CHUNK_TO_FREE];
+ void *virt_arr[MALI_MEM_OS_CHUNK_TO_FREE];
+ u32 i;
+
+ MALI_DEBUG_ASSERT(nr_to_free <= MALI_MEM_OS_CHUNK_TO_FREE);
+
+ /* Remove nr_to_free pages from the pool and store them locally on stack. */
+ for (i = 0; i < nr_to_free; i++) {
+ u32 pool_index = mali_mem_page_table_page_pool.count - i - 1;
+
+ phys_arr[i] = mali_mem_page_table_page_pool.page[pool_index].phys;
+ virt_arr[i] = mali_mem_page_table_page_pool.page[pool_index].mapping;
+ }
+
+ mali_mem_page_table_page_pool.count -= nr_to_free;
+
+ spin_unlock(&mali_mem_page_table_page_pool.lock);
+
+ /* After releasing the spinlock: free the pages we removed from the pool. */
+ for (i = 0; i < nr_to_free; i++) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
+ virt_arr[i], (dma_addr_t)phys_arr[i], &dma_attrs_wc);
+#else
+ dma_free_writecombine(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE,
+ virt_arr[i], (dma_addr_t)phys_arr[i]);
+#endif
+ }
+}
+
+static void mali_mem_os_trim_page_table_page_pool(void)
+{
+ size_t nr_to_free = 0;
+ size_t nr_to_keep;
+
+ /* Keep 2 page table pages for each 1024 pages in the page cache. */
+ nr_to_keep = mali_mem_os_allocator.pool_count / 512;
+ /* And a minimum of eight pages, to accomodate new sessions. */
+ nr_to_keep += 8;
+
+ if (0 == spin_trylock(&mali_mem_page_table_page_pool.lock)) return;
+
+ if (nr_to_keep < mali_mem_page_table_page_pool.count) {
+ nr_to_free = mali_mem_page_table_page_pool.count - nr_to_keep;
+ nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, nr_to_free);
+ }
+
+ /* Pool lock will be released by the callee. */
+ mali_mem_os_page_table_pool_free(nr_to_free);
+}
+
+static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ return mali_mem_os_allocator.pool_count;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
+static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask)
+#else
+static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask)
+#endif /* Linux < 2.6.35 */
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+#else
+static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+#endif /* Linux < 3.12.0 */
+#endif /* Linux < 3.0.0 */
+{
+ struct mali_page_node *m_page, *m_tmp;
+ unsigned long flags;
+ struct list_head *le, pages;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
+ int nr = nr_to_scan;
+#else
+ int nr = sc->nr_to_scan;
+#endif
+
+ if (0 == nr) {
+ return mali_mem_os_shrink_count(shrinker, sc);
+ }
+
+ if (0 == spin_trylock_irqsave(&mali_mem_os_allocator.pool_lock, flags)) {
+ /* Not able to lock. */
+ return -1;
+ }
+
+ if (0 == mali_mem_os_allocator.pool_count) {
+ /* No pages availble */
+ spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
+ return 0;
+ }
+
+ /* Release from general page pool */
+ nr = min((size_t)nr, mali_mem_os_allocator.pool_count);
+ mali_mem_os_allocator.pool_count -= nr;
+ list_for_each(le, &mali_mem_os_allocator.pool_pages) {
+ --nr;
+ if (0 == nr) break;
+ }
+ list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
+ spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags);
+
+ list_for_each_entry_safe(m_page, m_tmp, &pages, list) {
+ mali_mem_os_free_page_node(m_page);
+ }
+
+ if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
+ /* Pools are empty, stop timer */
+ MALI_DEBUG_PRINT(5, ("Stopping timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
+ cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+ return mali_mem_os_shrink_count(shrinker, sc);
+#else
+ return nr;
+#endif
+}
+
+static void mali_mem_os_trim_pool(struct work_struct *data)
+{
+ struct mali_page_node *m_page, *m_tmp;
+ struct list_head *le;
+ LIST_HEAD(pages);
+ size_t nr_to_free;
+
+ MALI_IGNORE(data);
+
+ MALI_DEBUG_PRINT(3, ("OS Mem: Trimming pool %u\n", mali_mem_os_allocator.pool_count));
+
+ /* Release from general page pool */
+ spin_lock(&mali_mem_os_allocator.pool_lock);
+ if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+ size_t count = mali_mem_os_allocator.pool_count - MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES;
+ const size_t min_to_free = min(64, MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES);
+
+ /* Free half the pages on the pool above the static limit. Or 64 pages, 256KB. */
+ nr_to_free = max(count / 2, min_to_free);
+
+ mali_mem_os_allocator.pool_count -= nr_to_free;
+ list_for_each(le, &mali_mem_os_allocator.pool_pages) {
+ --nr_to_free;
+ if (0 == nr_to_free) break;
+ }
+ list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le);
+ }
+ spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+ list_for_each_entry_safe(m_page, m_tmp, &pages, list) {
+ mali_mem_os_free_page_node(m_page);
+ }
+
+ /* Release some pages from page table page pool */
+ mali_mem_os_trim_page_table_page_pool();
+
+ if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) {
+ MALI_DEBUG_PRINT(4, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count));
+ queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES);
+ }
+}
+
+_mali_osk_errcode_t mali_mem_os_init(void)
+{
+ mali_mem_os_allocator.wq = alloc_workqueue("mali-mem", WQ_UNBOUND, 1);
+ if (NULL == mali_mem_os_allocator.wq) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &dma_attrs_wc);
+#endif
+
+ register_shrinker(&mali_mem_os_allocator.shrinker);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_os_term(void)
+{
+ struct mali_page_node *m_page, *m_tmp;
+ unregister_shrinker(&mali_mem_os_allocator.shrinker);
+ cancel_delayed_work_sync(&mali_mem_os_allocator.timed_shrinker);
+
+ if (NULL != mali_mem_os_allocator.wq) {
+ destroy_workqueue(mali_mem_os_allocator.wq);
+ mali_mem_os_allocator.wq = NULL;
+ }
+
+ spin_lock(&mali_mem_os_allocator.pool_lock);
+ list_for_each_entry_safe(m_page, m_tmp, &mali_mem_os_allocator.pool_pages, list) {
+ mali_mem_os_free_page_node(m_page);
+
+ --mali_mem_os_allocator.pool_count;
+ }
+ BUG_ON(mali_mem_os_allocator.pool_count);
+ spin_unlock(&mali_mem_os_allocator.pool_lock);
+
+ /* Release from page table page pool */
+ do {
+ u32 nr_to_free;
+
+ spin_lock(&mali_mem_page_table_page_pool.lock);
+
+ nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, mali_mem_page_table_page_pool.count);
+
+ /* Pool lock will be released by the callee. */
+ mali_mem_os_page_table_pool_free(nr_to_free);
+ } while (0 != mali_mem_page_table_page_pool.count);
+}
+
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size)
+{
+ mali_mem_os_allocator.allocation_limit = size;
+
+ MALI_SUCCESS;
+}
+
+u32 mali_mem_os_stat(void)
+{
+ return atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_os_alloc.h b/drivers/gpu/arm/utgard/linux/mali_memory_os_alloc.h
new file mode 100644
index 000000000000..f9ead166c455
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_os_alloc.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_OS_ALLOC_H__
+#define __MALI_MEMORY_OS_ALLOC_H__
+
+#include "mali_osk.h"
+#include "mali_memory_types.h"
+
+
+/** @brief Release Mali OS memory
+ *
+ * The session memory_lock must be held when calling this function.
+ *
+ * @param mem_bkend Pointer to the mali_mem_backend to release
+ */
+u32 mali_mem_os_release(mali_mem_backend *mem_bkend);
+
+_mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping);
+
+void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt);
+
+_mali_osk_errcode_t mali_mem_os_init(void);
+
+void mali_mem_os_term(void);
+
+u32 mali_mem_os_stat(void);
+
+void mali_mem_os_free_page_node(struct mali_page_node *m_page);
+
+int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size);
+
+u32 mali_mem_os_free(struct list_head *os_pages, u32 pages_count, mali_bool cow_flag);
+
+_mali_osk_errcode_t mali_mem_os_put_page(struct page *page);
+
+_mali_osk_errcode_t mali_mem_os_resize_pages(mali_mem_os_mem *mem_from, mali_mem_os_mem *mem_to, u32 start_page, u32 page_count);
+
+_mali_osk_errcode_t mali_mem_os_mali_map(mali_mem_os_mem *os_mem, struct mali_session_data *session, u32 vaddr, u32 start_page, u32 mapping_pgae_num, u32 props);
+
+void mali_mem_os_mali_unmap(mali_mem_allocation *alloc);
+
+int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma);
+
+_mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bkend, struct vm_area_struct *vma, unsigned long start_vaddr, u32 mappig_size);
+
+#endif /* __MALI_MEMORY_OS_ALLOC_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_swap_alloc.c b/drivers/gpu/arm/utgard/linux/mali_memory_swap_alloc.c
new file mode 100644
index 000000000000..a46eb198c98f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_swap_alloc.c
@@ -0,0 +1,942 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/idr.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/shmem_fs.h>
+#include <linux/file.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_memory.h"
+#include "mali_memory_manager.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_cow.h"
+#include "mali_ukk.h"
+#include "mali_kernel_utilization.h"
+#include "mali_memory_swap_alloc.h"
+
+
+static struct _mali_osk_bitmap idx_mgr;
+static struct file *global_swap_file;
+static struct address_space *global_swap_space;
+static _mali_osk_wq_work_t *mali_mem_swap_out_workq = NULL;
+static u32 mem_backend_swapped_pool_size;
+#ifdef MALI_MEM_SWAP_TRACKING
+static u32 mem_backend_swapped_unlock_size;
+#endif
+/* Lock order: mem_backend_swapped_pool_lock > each memory backend's mutex lock.
+ * This lock used to protect mem_backend_swapped_pool_size and mem_backend_swapped_pool. */
+static struct mutex mem_backend_swapped_pool_lock;
+static struct list_head mem_backend_swapped_pool;
+
+extern struct mali_mem_os_allocator mali_mem_os_allocator;
+
+#define MALI_SWAP_LOW_MEM_DEFAULT_VALUE (60*1024*1024)
+#define MALI_SWAP_INVALIDATE_MALI_ADDRESS (0) /* Used to mark the given memory cookie is invalidate. */
+#define MALI_SWAP_GLOBAL_SWAP_FILE_SIZE (0xFFFFFFFF)
+#define MALI_SWAP_GLOBAL_SWAP_FILE_INDEX ((MALI_SWAP_GLOBAL_SWAP_FILE_SIZE) >> PAGE_CACHE_SHIFT)
+#define MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE (1 << 15) /* Reserved for CoW nonlinear swap backend memory, the space size is 128MB. */
+
+unsigned int mali_mem_swap_out_threshold_value = MALI_SWAP_LOW_MEM_DEFAULT_VALUE;
+
+/**
+ * We have two situations to do shrinking things, one is we met low GPU utilization which shows GPU needn't touch too
+ * swappable backends in short time, and the other one is we add new swappable backends, the total pool size exceed
+ * the threshold value of the swapped pool size.
+ */
+typedef enum {
+ MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION = 100,
+ MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS = 257,
+} _mali_mem_swap_pool_shrink_type_t;
+
+static void mali_mem_swap_swapped_bkend_pool_check_for_low_utilization(void *arg);
+
+_mali_osk_errcode_t mali_mem_swap_init(void)
+{
+ gfp_t flags = __GFP_NORETRY | __GFP_NOWARN;
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_bitmap_init(&idx_mgr, MALI_SWAP_GLOBAL_SWAP_FILE_INDEX, MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE)) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ global_swap_file = shmem_file_setup("mali_swap", MALI_SWAP_GLOBAL_SWAP_FILE_SIZE, VM_NORESERVE);
+ if (IS_ERR(global_swap_file)) {
+ _mali_osk_bitmap_term(&idx_mgr);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ global_swap_space = global_swap_file->f_path.dentry->d_inode->i_mapping;
+
+ mali_mem_swap_out_workq = _mali_osk_wq_create_work(mali_mem_swap_swapped_bkend_pool_check_for_low_utilization, NULL);
+ if (NULL == mali_mem_swap_out_workq) {
+ _mali_osk_bitmap_term(&idx_mgr);
+ fput(global_swap_file);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
+ flags |= GFP_HIGHUSER;
+#else
+#ifdef CONFIG_ZONE_DMA32
+ flags |= GFP_DMA32;
+#else
+#ifdef CONFIG_ZONE_DMA
+ flags |= GFP_DMA;
+#else
+ /* arm64 utgard only work on < 4G, but the kernel
+ * didn't provide method to allocte memory < 4G
+ */
+ MALI_DEBUG_ASSERT(0);
+#endif
+#endif
+#endif
+
+ /* When we use shmem_read_mapping_page to allocate/swap-in, it will
+ * use these flags to allocate new page if need.*/
+ mapping_set_gfp_mask(global_swap_space, flags);
+
+ mem_backend_swapped_pool_size = 0;
+#ifdef MALI_MEM_SWAP_TRACKING
+ mem_backend_swapped_unlock_size = 0;
+#endif
+ mutex_init(&mem_backend_swapped_pool_lock);
+ INIT_LIST_HEAD(&mem_backend_swapped_pool);
+
+ MALI_DEBUG_PRINT(2, ("Mali SWAP: Swap out threshold vaule is %uM\n", mali_mem_swap_out_threshold_value >> 20));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_swap_term(void)
+{
+ _mali_osk_bitmap_term(&idx_mgr);
+
+ fput(global_swap_file);
+
+ _mali_osk_wq_delete_work(mali_mem_swap_out_workq);
+
+ MALI_DEBUG_ASSERT(list_empty(&mem_backend_swapped_pool));
+ MALI_DEBUG_ASSERT(0 == mem_backend_swapped_pool_size);
+
+ return;
+}
+
+struct file *mali_mem_swap_get_global_swap_file(void)
+{
+ return global_swap_file;
+}
+
+/* Judge if swappable backend in swapped pool. */
+static mali_bool mali_memory_swap_backend_in_swapped_pool(mali_mem_backend *mem_bkend)
+{
+ MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+
+ return !list_empty(&mem_bkend->list);
+}
+
+void mali_memory_swap_list_backend_delete(mali_mem_backend *mem_bkend)
+{
+ MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+
+ mutex_lock(&mem_backend_swapped_pool_lock);
+ mutex_lock(&mem_bkend->mutex);
+
+ if (MALI_FALSE == mali_memory_swap_backend_in_swapped_pool(mem_bkend)) {
+ mutex_unlock(&mem_bkend->mutex);
+ mutex_unlock(&mem_backend_swapped_pool_lock);
+ return;
+ }
+
+ MALI_DEBUG_ASSERT(!list_empty(&mem_bkend->list));
+
+ list_del_init(&mem_bkend->list);
+
+ mutex_unlock(&mem_bkend->mutex);
+
+ mem_backend_swapped_pool_size -= mem_bkend->size;
+
+ mutex_unlock(&mem_backend_swapped_pool_lock);
+}
+
+static void mali_mem_swap_out_page_node(mali_page_node *page_node)
+{
+ MALI_DEBUG_ASSERT(page_node);
+
+ dma_unmap_page(&mali_platform_device->dev, page_node->swap_it->dma_addr,
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+ set_page_dirty(page_node->swap_it->page);
+ page_cache_release(page_node->swap_it->page);
+}
+
+void mali_mem_swap_unlock_single_mem_backend(mali_mem_backend *mem_bkend)
+{
+ mali_page_node *m_page;
+
+ MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_bkend->mutex));
+
+ if (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN)) {
+ return;
+ }
+
+ mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN;
+
+ list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) {
+ mali_mem_swap_out_page_node(m_page);
+ }
+
+ return;
+}
+
+static void mali_mem_swap_unlock_partial_locked_mem_backend(mali_mem_backend *mem_bkend, mali_page_node *page_node)
+{
+ mali_page_node *m_page;
+
+ MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_bkend->mutex));
+
+ list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) {
+ if (m_page == page_node) {
+ break;
+ }
+ mali_mem_swap_out_page_node(m_page);
+ }
+}
+
+static void mali_mem_swap_swapped_bkend_pool_shrink(_mali_mem_swap_pool_shrink_type_t shrink_type)
+{
+ mali_mem_backend *bkend, *tmp_bkend;
+ long system_free_size;
+ u32 last_gpu_utilization, gpu_utilization_threshold_value, temp_swap_out_threshold_value;
+
+ MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_backend_swapped_pool_lock));
+
+ if (MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION == shrink_type) {
+ /**
+ * When we met that system memory is very low and Mali locked swappable memory size is less than
+ * threshold value, and at the same time, GPU load is very low and don't need high performance,
+ * at this condition, we can unlock more swap memory backend from swapped backends pool.
+ */
+ gpu_utilization_threshold_value = MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION;
+ temp_swap_out_threshold_value = (mali_mem_swap_out_threshold_value >> 2);
+ } else {
+ /* When we add swappable memory backends to swapped pool, we need to think that we couldn't
+ * hold too much swappable backends in Mali driver, and also we need considering performance.
+ * So there is a balance for swapping out memory backend, we should follow the following conditions:
+ * 1. Total memory size in global mem backend swapped pool is more than the defined threshold value.
+ * 2. System level free memory size is less than the defined threshold value.
+ * 3. Please note that GPU utilization problem isn't considered in this condition.
+ */
+ gpu_utilization_threshold_value = MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS;
+ temp_swap_out_threshold_value = mali_mem_swap_out_threshold_value;
+ }
+
+ /* Get system free pages number. */
+ system_free_size = global_page_state(NR_FREE_PAGES) * PAGE_SIZE;
+ last_gpu_utilization = _mali_ukk_utilization_gp_pp();
+
+ if ((last_gpu_utilization < gpu_utilization_threshold_value)
+ && (system_free_size < mali_mem_swap_out_threshold_value)
+ && (mem_backend_swapped_pool_size > temp_swap_out_threshold_value)) {
+ list_for_each_entry_safe(bkend, tmp_bkend, &mem_backend_swapped_pool, list) {
+ if (mem_backend_swapped_pool_size <= temp_swap_out_threshold_value) {
+ break;
+ }
+
+ mutex_lock(&bkend->mutex);
+
+ /* check if backend is in use. */
+ if (0 < bkend->using_count) {
+ mutex_unlock(&bkend->mutex);
+ continue;
+ }
+
+ mali_mem_swap_unlock_single_mem_backend(bkend);
+ list_del_init(&bkend->list);
+ mem_backend_swapped_pool_size -= bkend->size;
+#ifdef MALI_MEM_SWAP_TRACKING
+ mem_backend_swapped_unlock_size += bkend->size;
+#endif
+ mutex_unlock(&bkend->mutex);
+ }
+ }
+
+ return;
+}
+
+static void mali_mem_swap_swapped_bkend_pool_check_for_low_utilization(void *arg)
+{
+ MALI_IGNORE(arg);
+
+ mutex_lock(&mem_backend_swapped_pool_lock);
+
+ mali_mem_swap_swapped_bkend_pool_shrink(MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION);
+
+ mutex_unlock(&mem_backend_swapped_pool_lock);
+}
+
+/**
+ * After PP job finished, we add all of swappable memory backend used by this PP
+ * job to the tail of the global swapped pool, and if the total size of swappable memory is more than threshold
+ * value, we also need to shrink the swapped pool start from the head of the list.
+ */
+void mali_memory_swap_list_backend_add(mali_mem_backend *mem_bkend)
+{
+ mutex_lock(&mem_backend_swapped_pool_lock);
+ mutex_lock(&mem_bkend->mutex);
+
+ if (mali_memory_swap_backend_in_swapped_pool(mem_bkend)) {
+ MALI_DEBUG_ASSERT(!list_empty(&mem_bkend->list));
+
+ list_del_init(&mem_bkend->list);
+ list_add_tail(&mem_bkend->list, &mem_backend_swapped_pool);
+ mutex_unlock(&mem_bkend->mutex);
+ mutex_unlock(&mem_backend_swapped_pool_lock);
+ return;
+ }
+
+ list_add_tail(&mem_bkend->list, &mem_backend_swapped_pool);
+
+ mutex_unlock(&mem_bkend->mutex);
+ mem_backend_swapped_pool_size += mem_bkend->size;
+
+ mali_mem_swap_swapped_bkend_pool_shrink(MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS);
+
+ mutex_unlock(&mem_backend_swapped_pool_lock);
+ return;
+}
+
+
+u32 mali_mem_swap_idx_alloc(void)
+{
+ return _mali_osk_bitmap_alloc(&idx_mgr);
+}
+
+void mali_mem_swap_idx_free(u32 idx)
+{
+ _mali_osk_bitmap_free(&idx_mgr, idx);
+}
+
+static u32 mali_mem_swap_idx_range_alloc(u32 count)
+{
+ u32 index;
+
+ index = _mali_osk_bitmap_alloc_range(&idx_mgr, count);
+
+ return index;
+}
+
+static void mali_mem_swap_idx_range_free(u32 idx, int num)
+{
+ _mali_osk_bitmap_free_range(&idx_mgr, idx, num);
+}
+
+struct mali_swap_item *mali_mem_swap_alloc_swap_item(void)
+{
+ mali_swap_item *swap_item;
+
+ swap_item = kzalloc(sizeof(mali_swap_item), GFP_KERNEL);
+
+ if (NULL == swap_item) {
+ return NULL;
+ }
+
+ atomic_set(&swap_item->ref_count, 1);
+ swap_item->page = NULL;
+ atomic_add(1, &mali_mem_os_allocator.allocated_pages);
+
+ return swap_item;
+}
+
+void mali_mem_swap_free_swap_item(mali_swap_item *swap_item)
+{
+ struct inode *file_node;
+ long long start, end;
+
+ /* If this swap item is shared, we just reduce the reference counter. */
+ if (0 == atomic_dec_return(&swap_item->ref_count)) {
+ file_node = global_swap_file->f_path.dentry->d_inode;
+ start = swap_item->idx;
+ start = start << 12;
+ end = start + PAGE_SIZE;
+
+ shmem_truncate_range(file_node, start, (end - 1));
+
+ mali_mem_swap_idx_free(swap_item->idx);
+
+ atomic_sub(1, &mali_mem_os_allocator.allocated_pages);
+
+ kfree(swap_item);
+ }
+}
+
+/* Used to allocate new swap item for new memory allocation and cow page for write. */
+struct mali_page_node *_mali_mem_swap_page_node_allocate(void)
+{
+ struct mali_page_node *m_page;
+
+ m_page = _mali_page_node_allocate(MALI_PAGE_NODE_SWAP);
+
+ if (NULL == m_page) {
+ return NULL;
+ }
+
+ m_page->swap_it = mali_mem_swap_alloc_swap_item();
+
+ if (NULL == m_page->swap_it) {
+ kfree(m_page);
+ return NULL;
+ }
+
+ return m_page;
+}
+
+_mali_osk_errcode_t _mali_mem_swap_put_page_node(struct mali_page_node *m_page)
+{
+
+ mali_mem_swap_free_swap_item(m_page->swap_it);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_mem_swap_page_node_free(struct mali_page_node *m_page)
+{
+ _mali_mem_swap_put_page_node(m_page);
+
+ kfree(m_page);
+
+ return;
+}
+
+u32 mali_mem_swap_free(mali_mem_swap *swap_mem)
+{
+ struct mali_page_node *m_page, *m_tmp;
+ u32 free_pages_nr = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(swap_mem);
+
+ list_for_each_entry_safe(m_page, m_tmp, &swap_mem->pages, list) {
+ MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_SWAP);
+
+ /* free the page node and release the swap item, if the ref count is 1,
+ * then need also free the swap item. */
+ list_del(&m_page->list);
+ if (1 == _mali_page_node_get_ref_count(m_page)) {
+ free_pages_nr++;
+ }
+
+ _mali_mem_swap_page_node_free(m_page);
+ }
+
+ return free_pages_nr;
+}
+
+static u32 mali_mem_swap_cow_free(mali_mem_cow *cow_mem)
+{
+ struct mali_page_node *m_page, *m_tmp;
+ u32 free_pages_nr = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(cow_mem);
+
+ list_for_each_entry_safe(m_page, m_tmp, &cow_mem->pages, list) {
+ MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_SWAP);
+
+ /* free the page node and release the swap item, if the ref count is 1,
+ * then need also free the swap item. */
+ list_del(&m_page->list);
+ if (1 == _mali_page_node_get_ref_count(m_page)) {
+ free_pages_nr++;
+ }
+
+ _mali_mem_swap_page_node_free(m_page);
+ }
+
+ return free_pages_nr;
+}
+
+u32 mali_mem_swap_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped)
+{
+ mali_mem_allocation *alloc;
+ u32 free_pages_nr = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(mem_bkend);
+ alloc = mem_bkend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+
+ if (is_mali_mapped) {
+ mali_mem_swap_mali_unmap(alloc);
+ }
+
+ mali_memory_swap_list_backend_delete(mem_bkend);
+
+ mutex_lock(&mem_bkend->mutex);
+ /* To make sure the given memory backend was unlocked from Mali side,
+ * and then free this memory block. */
+ mali_mem_swap_unlock_single_mem_backend(mem_bkend);
+ mutex_unlock(&mem_bkend->mutex);
+
+ if (MALI_MEM_SWAP == mem_bkend->type) {
+ free_pages_nr = mali_mem_swap_free(&mem_bkend->swap_mem);
+ } else {
+ free_pages_nr = mali_mem_swap_cow_free(&mem_bkend->cow_mem);
+ }
+
+ return free_pages_nr;
+}
+
+mali_bool mali_mem_swap_in_page_node(struct mali_page_node *page_node)
+{
+ MALI_DEBUG_ASSERT(NULL != page_node);
+
+ page_node->swap_it->page = shmem_read_mapping_page(global_swap_space, page_node->swap_it->idx);
+
+ if (IS_ERR(page_node->swap_it->page)) {
+ MALI_DEBUG_PRINT_ERROR(("SWAP Mem: failed to swap in page with index: %d.\n", page_node->swap_it->idx));
+ return MALI_FALSE;
+ }
+
+ /* Ensure page is flushed from CPU caches. */
+ page_node->swap_it->dma_addr = dma_map_page(&mali_platform_device->dev, page_node->swap_it->page,
+ 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+
+ return MALI_TRUE;
+}
+
+int mali_mem_swap_alloc_pages(mali_mem_swap *swap_mem, u32 size, u32 *bkend_idx)
+{
+ size_t page_count = PAGE_ALIGN(size) / PAGE_SIZE;
+ struct mali_page_node *m_page;
+ long system_free_size;
+ u32 i, index;
+ mali_bool ret;
+
+ MALI_DEBUG_ASSERT(NULL != swap_mem);
+ MALI_DEBUG_ASSERT(NULL != bkend_idx);
+ MALI_DEBUG_ASSERT(page_count <= MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE);
+
+ if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) {
+ MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n",
+ size,
+ atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE,
+ mali_mem_os_allocator.allocation_limit));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ INIT_LIST_HEAD(&swap_mem->pages);
+ swap_mem->count = page_count;
+ index = mali_mem_swap_idx_range_alloc(page_count);
+
+ if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == index) {
+ MALI_PRINT_ERROR(("Mali Swap: Failed to allocate continuous index for swappable Mali memory."));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ for (i = 0; i < page_count; i++) {
+ m_page = _mali_mem_swap_page_node_allocate();
+
+ if (NULL == m_page) {
+ MALI_DEBUG_PRINT_ERROR(("SWAP Mem: Failed to allocate mali page node."));
+ swap_mem->count = i;
+
+ mali_mem_swap_free(swap_mem);
+ mali_mem_swap_idx_range_free(index + i, page_count - i);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ m_page->swap_it->idx = index + i;
+
+ ret = mali_mem_swap_in_page_node(m_page);
+
+ if (MALI_FALSE == ret) {
+ MALI_DEBUG_PRINT_ERROR(("SWAP Mem: Allocate new page from SHMEM file failed."));
+ _mali_mem_swap_page_node_free(m_page);
+ mali_mem_swap_idx_range_free(index + i + 1, page_count - i - 1);
+
+ swap_mem->count = i;
+ mali_mem_swap_free(swap_mem);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ list_add_tail(&m_page->list, &swap_mem->pages);
+ }
+
+ system_free_size = global_page_state(NR_FREE_PAGES) * PAGE_SIZE;
+
+ if ((system_free_size < mali_mem_swap_out_threshold_value)
+ && (mem_backend_swapped_pool_size > (mali_mem_swap_out_threshold_value >> 2))
+ && mali_utilization_enabled()) {
+ _mali_osk_wq_schedule_work(mali_mem_swap_out_workq);
+ }
+
+ *bkend_idx = index;
+ return 0;
+}
+
+void mali_mem_swap_mali_unmap(mali_mem_allocation *alloc)
+{
+ struct mali_session_data *session;
+
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ mali_session_memory_lock(session);
+ mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+ alloc->flags);
+ mali_session_memory_unlock(session);
+}
+
+
+/* Insert these pages from shmem to mali page table*/
+_mali_osk_errcode_t mali_mem_swap_mali_map(mali_mem_swap *swap_mem, struct mali_session_data *session, u32 vaddr, u32 props)
+{
+ struct mali_page_directory *pagedir = session->page_directory;
+ struct mali_page_node *m_page;
+ dma_addr_t phys;
+ u32 virt = vaddr;
+ u32 prop = props;
+
+ list_for_each_entry(m_page, &swap_mem->pages, list) {
+ MALI_DEBUG_ASSERT(NULL != m_page->swap_it->page);
+ phys = m_page->swap_it->dma_addr;
+
+ mali_mmu_pagedir_update(pagedir, virt, phys, MALI_MMU_PAGE_SIZE, prop);
+ virt += MALI_MMU_PAGE_SIZE;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+int mali_mem_swap_in_pages(struct mali_pp_job *job)
+{
+ u32 num_memory_cookies;
+ struct mali_session_data *session;
+ struct mali_vma_node *mali_vma_node = NULL;
+ mali_mem_allocation *mali_alloc = NULL;
+ mali_mem_backend *mem_bkend = NULL;
+ struct mali_page_node *m_page;
+ mali_bool swap_in_success = MALI_TRUE;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ num_memory_cookies = mali_pp_job_num_memory_cookies(job);
+ session = mali_pp_job_get_session(job);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ for (i = 0; i < num_memory_cookies; i++) {
+
+ u32 mali_addr = mali_pp_job_get_memory_cookie(job, i);
+
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+ if (NULL == mali_vma_node) {
+ job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS;
+ swap_in_success = MALI_FALSE;
+ MALI_PRINT_ERROR(("SWAP Mem: failed to find mali_vma_node through Mali address: 0x%08x.\n", mali_addr));
+ continue;
+ }
+
+ mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+ MALI_DEBUG_ASSERT(NULL != mali_alloc);
+
+ if (MALI_MEM_SWAP != mali_alloc->type &&
+ MALI_MEM_COW != mali_alloc->type) {
+ continue;
+ }
+
+ /* Get backend memory & Map on GPU */
+ mutex_lock(&mali_idr_mutex);
+ mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+ mutex_unlock(&mali_idr_mutex);
+ MALI_DEBUG_ASSERT(NULL != mem_bkend);
+
+ /* We neednot hold backend's lock here, race safe.*/
+ if ((MALI_MEM_COW == mem_bkend->type) &&
+ (!(mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
+ continue;
+ }
+
+ mutex_lock(&mem_bkend->mutex);
+
+ /* When swap_in_success is MALI_FALSE, it means this job has memory backend that could not be swapped in,
+ * and it will be aborted in mali scheduler, so here, we just mark those memory cookies which
+ * should not be swapped out when delete job to invalide */
+ if (MALI_FALSE == swap_in_success) {
+ job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS;
+ mutex_unlock(&mem_bkend->mutex);
+ continue;
+ }
+
+ /* Before swap in, checking if this memory backend has been swapped in by the latest flushed jobs. */
+ ++mem_bkend->using_count;
+
+ if (1 < mem_bkend->using_count) {
+ MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN != (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags));
+ mutex_unlock(&mem_bkend->mutex);
+ continue;
+ }
+
+ if (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN != (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags)) {
+ mutex_unlock(&mem_bkend->mutex);
+ continue;
+ }
+
+
+ list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) {
+ if (MALI_FALSE == mali_mem_swap_in_page_node(m_page)) {
+ /* Don't have enough memory to swap in page, so release pages have already been swapped
+ * in and then mark this pp job to be fail. */
+ mali_mem_swap_unlock_partial_locked_mem_backend(mem_bkend, m_page);
+ swap_in_success = MALI_FALSE;
+ break;
+ }
+ }
+
+ if (swap_in_success) {
+#ifdef MALI_MEM_SWAP_TRACKING
+ mem_backend_swapped_unlock_size -= mem_bkend->size;
+#endif
+ _mali_osk_mutex_wait(session->memory_lock);
+ mali_mem_swap_mali_map(&mem_bkend->swap_mem, session, mali_alloc->mali_mapping.addr, mali_alloc->mali_mapping.properties);
+ _mali_osk_mutex_signal(session->memory_lock);
+
+ /* Remove the unlock flag from mem backend flags, mark this backend has been swapped in. */
+ mem_bkend->flags &= ~(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN);
+ mutex_unlock(&mem_bkend->mutex);
+ } else {
+ --mem_bkend->using_count;
+ /* Marking that this backend is not swapped in, need not to be processed anymore. */
+ job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS;
+ mutex_unlock(&mem_bkend->mutex);
+ }
+ }
+
+ job->swap_status = swap_in_success ? MALI_SWAP_IN_SUCC : MALI_SWAP_IN_FAIL;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+int mali_mem_swap_out_pages(struct mali_pp_job *job)
+{
+ u32 num_memory_cookies;
+ struct mali_session_data *session;
+ struct mali_vma_node *mali_vma_node = NULL;
+ mali_mem_allocation *mali_alloc = NULL;
+ mali_mem_backend *mem_bkend = NULL;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ num_memory_cookies = mali_pp_job_num_memory_cookies(job);
+ session = mali_pp_job_get_session(job);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+
+ for (i = 0; i < num_memory_cookies; i++) {
+ u32 mali_addr = mali_pp_job_get_memory_cookie(job, i);
+
+ if (MALI_SWAP_INVALIDATE_MALI_ADDRESS == mali_addr) {
+ continue;
+ }
+
+ mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0);
+
+ if (NULL == mali_vma_node) {
+ MALI_PRINT_ERROR(("SWAP Mem: failed to find mali_vma_node through Mali address: 0x%08x.\n", mali_addr));
+ continue;
+ }
+
+ mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node);
+ MALI_DEBUG_ASSERT(NULL != mali_alloc);
+
+ if (MALI_MEM_SWAP != mali_alloc->type &&
+ MALI_MEM_COW != mali_alloc->type) {
+ continue;
+ }
+
+ mutex_lock(&mali_idr_mutex);
+ mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+ mutex_unlock(&mali_idr_mutex);
+ MALI_DEBUG_ASSERT(NULL != mem_bkend);
+
+ /* We neednot hold backend's lock here, race safe.*/
+ if ((MALI_MEM_COW == mem_bkend->type) &&
+ (!(mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) {
+ continue;
+ }
+
+ mutex_lock(&mem_bkend->mutex);
+
+ MALI_DEBUG_ASSERT(0 < mem_bkend->using_count);
+
+ /* Reducing the using_count of mem backend means less pp job are using this memory backend,
+ * if this count get to zero, it means no pp job is using it now, could put it to swap out list. */
+ --mem_bkend->using_count;
+
+ if (0 < mem_bkend->using_count) {
+ mutex_unlock(&mem_bkend->mutex);
+ continue;
+ }
+ mutex_unlock(&mem_bkend->mutex);
+
+ mali_memory_swap_list_backend_add(mem_bkend);
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+int mali_mem_swap_allocate_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep)
+{
+ struct mali_page_node *m_page, *found_node = NULL;
+ struct page *found_page;
+ mali_mem_swap *swap = NULL;
+ mali_mem_cow *cow = NULL;
+ dma_addr_t dma_addr;
+ u32 i = 0;
+
+ if (MALI_MEM_SWAP == mem_bkend->type) {
+ swap = &mem_bkend->swap_mem;
+ list_for_each_entry(m_page, &swap->pages, list) {
+ if (i == offset) {
+ found_node = m_page;
+ break;
+ }
+ i++;
+ }
+ } else {
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
+ MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_SWAP_COWED == (MALI_MEM_BACKEND_FLAG_SWAP_COWED & mem_bkend->flags));
+
+ cow = &mem_bkend->cow_mem;
+ list_for_each_entry(m_page, &cow->pages, list) {
+ if (i == offset) {
+ found_node = m_page;
+ break;
+ }
+ i++;
+ }
+ }
+
+ if (NULL == found_node) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ found_page = shmem_read_mapping_page(global_swap_space, found_node->swap_it->idx);
+
+ if (!IS_ERR(found_page)) {
+ lock_page(found_page);
+ dma_addr = dma_map_page(&mali_platform_device->dev, found_page,
+ 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+ dma_unmap_page(&mali_platform_device->dev, dma_addr,
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+
+ *pagep = found_page;
+ } else {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+int mali_mem_swap_cow_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep)
+{
+ struct mali_page_node *m_page, *found_node = NULL, *new_node = NULL;
+ mali_mem_cow *cow = NULL;
+ u32 i = 0;
+
+ MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
+ MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_SWAP_COWED == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED));
+ MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN == (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags));
+ MALI_DEBUG_ASSERT(!mali_memory_swap_backend_in_swapped_pool(mem_bkend));
+
+ cow = &mem_bkend->cow_mem;
+ list_for_each_entry(m_page, &cow->pages, list) {
+ if (i == offset) {
+ found_node = m_page;
+ break;
+ }
+ i++;
+ }
+
+ if (NULL == found_node) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ new_node = _mali_mem_swap_page_node_allocate();
+
+ if (NULL == new_node) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ new_node->swap_it->idx = mali_mem_swap_idx_alloc();
+
+ if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == new_node->swap_it->idx) {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW on demand.\n"));
+ kfree(new_node->swap_it);
+ kfree(new_node);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (MALI_FALSE == mali_mem_swap_in_page_node(new_node)) {
+ _mali_mem_swap_page_node_free(new_node);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* swap in found node for copy in kernel. */
+ if (MALI_FALSE == mali_mem_swap_in_page_node(found_node)) {
+ mali_mem_swap_out_page_node(new_node);
+ _mali_mem_swap_page_node_free(new_node);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ _mali_mem_cow_copy_page(found_node, new_node);
+
+ list_replace(&found_node->list, &new_node->list);
+
+ if (1 != _mali_page_node_get_ref_count(found_node)) {
+ atomic_add(1, &mem_bkend->mali_allocation->session->mali_mem_allocated_pages);
+ if (atomic_read(&mem_bkend->mali_allocation->session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > mem_bkend->mali_allocation->session->max_mali_mem_allocated_size) {
+ mem_bkend->mali_allocation->session->max_mali_mem_allocated_size = atomic_read(&mem_bkend->mali_allocation->session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
+ }
+ mem_bkend->cow_mem.change_pages_nr++;
+ }
+
+ mali_mem_swap_out_page_node(found_node);
+ _mali_mem_swap_page_node_free(found_node);
+
+ /* When swap in the new page node, we have called dma_map_page for this page.\n */
+ dma_unmap_page(&mali_platform_device->dev, new_node->swap_it->dma_addr,
+ _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
+
+ lock_page(new_node->swap_it->page);
+
+ *pagep = new_node->swap_it->page;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+#ifdef MALI_MEM_SWAP_TRACKING
+void mali_mem_swap_tracking(u32 *swap_pool_size, u32 *unlock_size)
+{
+ *swap_pool_size = mem_backend_swapped_pool_size;
+ *unlock_size = mem_backend_swapped_unlock_size;
+}
+#endif
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_swap_alloc.h b/drivers/gpu/arm/utgard/linux/mali_memory_swap_alloc.h
new file mode 100644
index 000000000000..a393ecce3a00
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_swap_alloc.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_SWAP_ALLOC_H__
+#define __MALI_MEMORY_SWAP_ALLOC_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+
+#include "mali_memory_types.h"
+#include "mali_pp_job.h"
+
+/**
+ * Initialize memory swapping module.
+ */
+_mali_osk_errcode_t mali_mem_swap_init(void);
+
+void mali_mem_swap_term(void);
+
+/**
+ * Return global share memory file to other modules.
+ */
+struct file *mali_mem_swap_get_global_swap_file(void);
+
+/**
+ * Unlock the given memory backend and pages in it could be swapped out by kernel.
+ */
+void mali_mem_swap_unlock_single_mem_backend(mali_mem_backend *mem_bkend);
+
+/**
+ * Remove the given memory backend from global swap list.
+ */
+void mali_memory_swap_list_backend_delete(mali_mem_backend *mem_bkend);
+
+/**
+ * Add the given memory backend to global swap list.
+ */
+void mali_memory_swap_list_backend_add(mali_mem_backend *mem_bkend);
+
+/**
+ * Allocate 1 index from bitmap used as page index in global swap file.
+ */
+u32 mali_mem_swap_idx_alloc(void);
+
+void mali_mem_swap_idx_free(u32 idx);
+
+/**
+ * Allocate a new swap item without page index.
+ */
+struct mali_swap_item *mali_mem_swap_alloc_swap_item(void);
+
+/**
+ * Free a swap item, truncate the corresponding space in page cache and free index of page.
+ */
+void mali_mem_swap_free_swap_item(mali_swap_item *swap_item);
+
+/**
+ * Allocate a page node with swap item.
+ */
+struct mali_page_node *_mali_mem_swap_page_node_allocate(void);
+
+/**
+ * Reduce the reference count of given page node and if return 0, just free this page node.
+ */
+_mali_osk_errcode_t _mali_mem_swap_put_page_node(struct mali_page_node *m_page);
+
+void _mali_mem_swap_page_node_free(struct mali_page_node *m_page);
+
+/**
+ * Free a swappable memory backend.
+ */
+u32 mali_mem_swap_free(mali_mem_swap *swap_mem);
+
+/**
+ * Ummap and free.
+ */
+u32 mali_mem_swap_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped);
+
+/**
+ * Read in a page from global swap file with the pre-allcated page index.
+ */
+mali_bool mali_mem_swap_in_page_node(struct mali_page_node *page_node);
+
+int mali_mem_swap_alloc_pages(mali_mem_swap *swap_mem, u32 size, u32 *bkend_idx);
+
+_mali_osk_errcode_t mali_mem_swap_mali_map(mali_mem_swap *swap_mem, struct mali_session_data *session, u32 vaddr, u32 props);
+
+void mali_mem_swap_mali_unmap(mali_mem_allocation *alloc);
+
+/**
+ * When pp job created, we need swap in all of memory backend needed by this pp job.
+ */
+int mali_mem_swap_in_pages(struct mali_pp_job *job);
+
+/**
+ * Put all of memory backends used this pp job to the global swap list.
+ */
+int mali_mem_swap_out_pages(struct mali_pp_job *job);
+
+/**
+ * This will be called in page fault to process CPU read&write.
+ */
+int mali_mem_swap_allocate_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep) ;
+
+/**
+ * Used to process cow on demand for swappable memory backend.
+ */
+int mali_mem_swap_cow_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep);
+
+#ifdef MALI_MEM_SWAP_TRACKING
+void mali_mem_swap_tracking(u32 *swap_pool_size, u32 *unlock_size);
+#endif
+#endif /* __MALI_MEMORY_SWAP_ALLOC_H__ */
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_types.h b/drivers/gpu/arm/utgard/linux/mali_memory_types.h
new file mode 100644
index 000000000000..82af8fed66c9
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_types.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_TYPES_H__
+#define __MALI_MEMORY_TYPES_H__
+
+#include <linux/mm.h>
+
+#if defined(CONFIG_MALI400_UMP)
+#include "ump_kernel_interface.h"
+#endif
+
+typedef u32 mali_address_t;
+
+typedef enum mali_mem_type {
+ MALI_MEM_OS,
+ MALI_MEM_EXTERNAL,
+ MALI_MEM_SWAP,
+ MALI_MEM_DMA_BUF,
+ MALI_MEM_UMP,
+ MALI_MEM_BLOCK,
+ MALI_MEM_COW,
+ MALI_MEM_TYPE_MAX,
+} mali_mem_type;
+
+typedef struct mali_block_item {
+ /* for block type, the block_phy is alway page size align
+ * so use low 12bit used for ref_cout.
+ */
+ unsigned long phy_addr;
+} mali_block_item;
+
+/**
+ * idx is used to locate the given page in the address space of swap file.
+ * ref_count is used to mark how many memory backends are using this item.
+ */
+typedef struct mali_swap_item {
+ u32 idx;
+ atomic_t ref_count;
+ struct page *page;
+ dma_addr_t dma_addr;
+} mali_swap_item;
+
+typedef enum mali_page_node_type {
+ MALI_PAGE_NODE_OS,
+ MALI_PAGE_NODE_BLOCK,
+ MALI_PAGE_NODE_SWAP,
+} mali_page_node_type;
+
+typedef struct mali_page_node {
+ struct list_head list;
+ union {
+ struct page *page;
+ mali_block_item *blk_it; /*pointer to block item*/
+ mali_swap_item *swap_it;
+ };
+
+ u32 type;
+} mali_page_node;
+
+typedef struct mali_mem_os_mem {
+ struct list_head pages;
+ u32 count;
+} mali_mem_os_mem;
+
+typedef struct mali_mem_dma_buf {
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+ struct mali_dma_buf_attachment *attachment;
+#endif
+} mali_mem_dma_buf;
+
+typedef struct mali_mem_external {
+ dma_addr_t phys;
+ u32 size;
+} mali_mem_external;
+
+typedef struct mali_mem_ump {
+#if defined(CONFIG_MALI400_UMP)
+ ump_dd_handle handle;
+#endif
+} mali_mem_ump;
+
+typedef struct block_allocator_allocation {
+ /* The list will be released in reverse order */
+ struct block_info *last_allocated;
+ u32 mapping_length;
+ struct block_allocator *info;
+} block_allocator_allocation;
+
+typedef struct mali_mem_block_mem {
+ struct list_head pfns;
+ u32 count;
+} mali_mem_block_mem;
+
+typedef struct mali_mem_virt_mali_mapping {
+ mali_address_t addr; /* Virtual Mali address */
+ u32 properties; /* MMU Permissions + cache, must match MMU HW */
+} mali_mem_virt_mali_mapping;
+
+typedef struct mali_mem_virt_cpu_mapping {
+ void __user *addr;
+ struct vm_area_struct *vma;
+} mali_mem_virt_cpu_mapping;
+
+#define MALI_MEM_ALLOCATION_VALID_MAGIC 0xdeda110c
+#define MALI_MEM_ALLOCATION_FREED_MAGIC 0x10101010
+
+typedef struct mali_mm_node {
+ /* MALI GPU vaddr start, use u32 for mmu only support 32bit address*/
+ uint32_t start; /* GPU vaddr */
+ uint32_t size; /* GPU allocation virtual size */
+ unsigned allocated : 1;
+} mali_mm_node;
+
+typedef struct mali_vma_node {
+ struct mali_mm_node vm_node;
+ struct rb_node vm_rb;
+} mali_vma_node;
+
+
+typedef struct mali_mem_allocation {
+ MALI_DEBUG_CODE(u32 magic);
+ mali_mem_type type; /**< Type of memory */
+ u32 flags; /**< Flags for this allocation */
+
+ struct mali_session_data *session; /**< Pointer to session that owns the allocation */
+
+ mali_mem_virt_cpu_mapping cpu_mapping; /**< CPU mapping */
+ mali_mem_virt_mali_mapping mali_mapping; /**< Mali mapping */
+
+ /* add for new memory system */
+ struct mali_vma_node mali_vma_node;
+ u32 vsize; /* virtual size*/
+ u32 psize; /* physical backend memory size*/
+ struct list_head list;
+ s32 backend_handle; /* idr for mem_backend */
+ _mali_osk_atomic_t mem_alloc_refcount;
+} mali_mem_allocation;
+
+struct mali_mem_os_allocator {
+ spinlock_t pool_lock;
+ struct list_head pool_pages;
+ size_t pool_count;
+
+ atomic_t allocated_pages;
+ size_t allocation_limit;
+
+ struct shrinker shrinker;
+ struct delayed_work timed_shrinker;
+ struct workqueue_struct *wq;
+};
+
+/* COW backend memory type */
+typedef struct mali_mem_cow {
+ struct list_head pages; /**< all pages for this cow backend allocation,
+ including new allocated pages for modified range*/
+ u32 count; /**< number of pages */
+ s32 change_pages_nr;
+} mali_mem_cow;
+
+typedef struct mali_mem_swap {
+ struct list_head pages;
+ u32 count;
+} mali_mem_swap;
+
+#define MALI_MEM_BACKEND_FLAG_COWED (0x1) /* COW has happen on this backend */
+#define MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE (0x2) /* This is an COW backend, mapped as not allowed cpu to write */
+#define MALI_MEM_BACKEND_FLAG_SWAP_COWED (0x4) /* Mark the given backend is cowed from swappable memory. */
+/* Mark this backend is not swapped_in in MALI driver, and before using it,
+ * we should swap it in and set up corresponding page table. */
+#define MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN (0x8)
+#define MALI_MEM_BACKEND_FLAG_NOT_BINDED (0x1 << 5) /* this backend it not back with physical memory, used for defer bind */
+#define MALI_MEM_BACKEND_FLAG_BINDED (0x1 << 6) /* this backend it back with physical memory, used for defer bind */
+
+typedef struct mali_mem_backend {
+ mali_mem_type type; /**< Type of backend memory */
+ u32 flags; /**< Flags for this allocation */
+ u32 size;
+ /* Union selected by type. */
+ union {
+ mali_mem_os_mem os_mem; /**< MALI_MEM_OS */
+ mali_mem_external ext_mem; /**< MALI_MEM_EXTERNAL */
+ mali_mem_dma_buf dma_buf; /**< MALI_MEM_DMA_BUF */
+ mali_mem_ump ump_mem; /**< MALI_MEM_UMP */
+ mali_mem_block_mem block_mem; /**< MALI_MEM_BLOCK */
+ mali_mem_cow cow_mem;
+ mali_mem_swap swap_mem;
+ };
+ mali_mem_allocation *mali_allocation;
+ struct mutex mutex;
+ mali_mem_type cow_type;
+
+ struct list_head list; /**< Used to link swappable memory backend to the global swappable list */
+ int using_count; /**< Mark how many PP jobs are using this memory backend */
+ u32 start_idx; /**< If the correspondign vma of this backend is linear, this value will be used to set vma->vm_pgoff */
+} mali_mem_backend;
+
+#define MALI_MEM_FLAG_MALI_GUARD_PAGE (_MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+#define MALI_MEM_FLAG_DONT_CPU_MAP (1 << 1)
+#define MALI_MEM_FLAG_CAN_RESIZE (_MALI_MEMORY_ALLOCATE_RESIZEABLE)
+#endif /* __MALI_MEMORY_TYPES__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_ump.c b/drivers/gpu/arm/utgard/linux/mali_memory_ump.c
new file mode 100644
index 000000000000..bacadc85e47e
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_ump.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_kernel_linux.h"
+#include "mali_memory.h"
+#include "ump_kernel_interface.h"
+
+static int mali_mem_ump_map(mali_mem_backend *mem_backend)
+{
+ ump_dd_handle ump_mem;
+ mali_mem_allocation *alloc;
+ struct mali_session_data *session;
+ u32 nr_blocks;
+ u32 i;
+ ump_dd_physical_block *ump_blocks;
+ struct mali_page_directory *pagedir;
+ u32 offset = 0;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ MALI_DEBUG_ASSERT(MALI_MEM_UMP == mem_backend->type);
+
+ alloc = mem_backend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ ump_mem = mem_backend->ump_mem.handle;
+ MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem);
+
+ nr_blocks = ump_dd_phys_block_count_get(ump_mem);
+ if (nr_blocks == 0) {
+ MALI_DEBUG_PRINT(1, ("No block count\n"));
+ return -EINVAL;
+ }
+
+ ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks) * nr_blocks);
+ if (NULL == ump_blocks) {
+ return -ENOMEM;
+ }
+
+ if (UMP_DD_INVALID == ump_dd_phys_blocks_get(ump_mem, ump_blocks, nr_blocks)) {
+ _mali_osk_free(ump_blocks);
+ return -EFAULT;
+ }
+
+ pagedir = session->page_directory;
+
+ mali_session_memory_lock(session);
+
+ err = mali_mem_mali_map_prepare(alloc);
+ if (_MALI_OSK_ERR_OK != err) {
+ MALI_DEBUG_PRINT(1, ("Mapping of UMP memory failed\n"));
+
+ _mali_osk_free(ump_blocks);
+ mali_session_memory_unlock(session);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_blocks; ++i) {
+ u32 virt = alloc->mali_vma_node.vm_node.start + offset;
+
+ MALI_DEBUG_PRINT(7, ("Mapping in 0x%08x size %d\n", ump_blocks[i].addr , ump_blocks[i].size));
+
+ mali_mmu_pagedir_update(pagedir, virt, ump_blocks[i].addr,
+ ump_blocks[i].size, MALI_MMU_FLAGS_DEFAULT);
+
+ offset += ump_blocks[i].size;
+ }
+
+ if (alloc->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+ u32 virt = alloc->mali_vma_node.vm_node.start + offset;
+
+ /* Map in an extra virtual guard page at the end of the VMA */
+ MALI_DEBUG_PRINT(6, ("Mapping in extra guard page\n"));
+
+ mali_mmu_pagedir_update(pagedir, virt, ump_blocks[0].addr, _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
+
+ offset += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+ mali_session_memory_unlock(session);
+ _mali_osk_free(ump_blocks);
+ return 0;
+}
+
+static void mali_mem_ump_unmap(mali_mem_allocation *alloc)
+{
+ struct mali_session_data *session;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+ mali_session_memory_lock(session);
+ mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start,
+ alloc->flags);
+ mali_session_memory_unlock(session);
+}
+
+int mali_mem_bind_ump_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, u32 secure_id, u32 flags)
+{
+ ump_dd_handle ump_mem;
+ int ret;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ MALI_DEBUG_ASSERT(MALI_MEM_UMP == mem_backend->type);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
+ secure_id, alloc->mali_vma_node.vm_node.start, alloc->mali_vma_node.vm_node.size));
+
+ ump_mem = ump_dd_handle_create_from_secure_id(secure_id);
+ if (UMP_DD_HANDLE_INVALID == ump_mem) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ alloc->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;
+ if (flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
+ alloc->flags |= MALI_MEM_FLAG_MALI_GUARD_PAGE;
+ }
+
+ mem_backend->ump_mem.handle = ump_mem;
+
+ ret = mali_mem_ump_map(mem_backend);
+ if (0 != ret) {
+ ump_dd_reference_release(ump_mem);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ MALI_DEBUG_PRINT(3, ("Returning from UMP bind\n"));
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mem_unbind_ump_buf(mali_mem_backend *mem_backend)
+{
+ ump_dd_handle ump_mem;
+ mali_mem_allocation *alloc;
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+ MALI_DEBUG_ASSERT(MALI_MEM_UMP == mem_backend->type);
+ ump_mem = mem_backend->ump_mem.handle;
+ MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem);
+
+ alloc = mem_backend->mali_allocation;
+ MALI_DEBUG_ASSERT_POINTER(alloc);
+ mali_mem_ump_unmap(alloc);
+ ump_dd_reference_release(ump_mem);
+}
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_ump.h b/drivers/gpu/arm/utgard/linux/mali_memory_ump.h
new file mode 100644
index 000000000000..4adb70df884e
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_ump.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_UMP_BUF_H__
+#define __MALI_MEMORY_UMP_BUF_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+#include "mali_memory.h"
+
+int mali_mem_bind_ump_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, u32 secure_id, u32 flags);
+void mali_mem_unbind_ump_buf(mali_mem_backend *mem_backend);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_MEMORY_DMA_BUF_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_util.c b/drivers/gpu/arm/utgard/linux/mali_memory_util.c
new file mode 100644
index 000000000000..bcca43c09f0b
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_util.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_linux.h"
+#include "mali_scheduler.h"
+
+#include "mali_memory.h"
+#include "mali_memory_os_alloc.h"
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_dma_buf.h"
+#endif
+#if defined(CONFIG_MALI400_UMP)
+#include "mali_memory_ump.h"
+#endif
+#include "mali_memory_external.h"
+#include "mali_memory_manager.h"
+#include "mali_memory_virtual.h"
+#include "mali_memory_cow.h"
+#include "mali_memory_block_alloc.h"
+#include "mali_memory_swap_alloc.h"
+
+
+
+/**
+*function @_mali_free_allocation_mem - free a memory allocation
+*/
+static u32 _mali_free_allocation_mem(mali_mem_allocation *mali_alloc)
+{
+ mali_mem_backend *mem_bkend = NULL;
+ u32 free_pages_nr = 0;
+
+ struct mali_session_data *session = mali_alloc->session;
+ MALI_DEBUG_PRINT(4, (" _mali_free_allocation_mem, psize =0x%x! \n", mali_alloc->psize));
+ if (0 == mali_alloc->psize)
+ goto out;
+
+ /* Get backend memory & Map on CPU */
+ mutex_lock(&mali_idr_mutex);
+ mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle);
+ mutex_unlock(&mali_idr_mutex);
+ MALI_DEBUG_ASSERT(NULL != mem_bkend);
+
+ switch (mem_bkend->type) {
+ case MALI_MEM_OS:
+ free_pages_nr = mali_mem_os_release(mem_bkend);
+ atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages);
+ break;
+ case MALI_MEM_UMP:
+#if defined(CONFIG_MALI400_UMP)
+ mali_mem_unbind_ump_buf(mem_bkend);
+ atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]);
+#else
+ MALI_DEBUG_PRINT(2, ("UMP not supported\n"));
+#endif
+ break;
+ case MALI_MEM_DMA_BUF:
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+ mali_mem_unbind_dma_buf(mem_bkend);
+ atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]);
+#else
+ MALI_DEBUG_PRINT(2, ("DMA not supported\n"));
+#endif
+ break;
+ case MALI_MEM_EXTERNAL:
+ mali_mem_unbind_ext_buf(mem_bkend);
+ atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]);
+ break;
+
+ case MALI_MEM_BLOCK:
+ free_pages_nr = mali_mem_block_release(mem_bkend);
+ atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages);
+ break;
+
+ case MALI_MEM_COW:
+ if (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED) {
+ free_pages_nr = mali_mem_swap_release(mem_bkend, MALI_TRUE);
+ } else {
+ free_pages_nr = mali_mem_cow_release(mem_bkend, MALI_TRUE);
+ }
+ atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages);
+ break;
+ case MALI_MEM_SWAP:
+ free_pages_nr = mali_mem_swap_release(mem_bkend, MALI_TRUE);
+ atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages);
+ atomic_sub(free_pages_nr, &session->mali_mem_array[mem_bkend->type]);
+ break;
+ default:
+ MALI_DEBUG_PRINT(1, ("mem type %d is not in the mali_mem_type enum.\n", mem_bkend->type));
+ break;
+ }
+
+ /*Remove backend memory idex */
+ mutex_lock(&mali_idr_mutex);
+ idr_remove(&mali_backend_idr, mali_alloc->backend_handle);
+ mutex_unlock(&mali_idr_mutex);
+ kfree(mem_bkend);
+out:
+ /* remove memory allocation */
+ mali_vma_offset_remove(&session->allocation_mgr, &mali_alloc->mali_vma_node);
+ mali_mem_allocation_struct_destory(mali_alloc);
+ return free_pages_nr;
+}
+
+/**
+* ref_count for allocation
+*/
+u32 mali_allocation_unref(struct mali_mem_allocation **alloc)
+{
+ u32 free_pages_nr = 0;
+ mali_mem_allocation *mali_alloc = *alloc;
+ *alloc = NULL;
+ if (0 == _mali_osk_atomic_dec_return(&mali_alloc->mem_alloc_refcount)) {
+ free_pages_nr = _mali_free_allocation_mem(mali_alloc);
+ }
+ return free_pages_nr;
+}
+
+void mali_allocation_ref(struct mali_mem_allocation *alloc)
+{
+ _mali_osk_atomic_inc(&alloc->mem_alloc_refcount);
+}
+
+void mali_free_session_allocations(struct mali_session_data *session)
+{
+ struct mali_mem_allocation *entry, *next;
+
+ MALI_DEBUG_PRINT(4, (" mali_free_session_allocations! \n"));
+
+ list_for_each_entry_safe(entry, next, &session->allocation_mgr.head, list) {
+ mali_allocation_unref(&entry);
+ }
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_util.h b/drivers/gpu/arm/utgard/linux/mali_memory_util.h
new file mode 100644
index 000000000000..0d686979d7a8
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_util.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_UTIL_H__
+#define __MALI_MEMORY_UTIL_H__
+
+u32 mali_allocation_unref(struct mali_mem_allocation **alloc);
+
+void mali_allocation_ref(struct mali_mem_allocation *alloc);
+
+void mali_free_session_allocations(struct mali_session_data *session);
+
+#endif
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_virtual.c b/drivers/gpu/arm/utgard/linux/mali_memory_virtual.c
new file mode 100644
index 000000000000..35f8f90a444d
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_virtual.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/fs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_linux.h"
+#include "mali_scheduler.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_memory_manager.h"
+#include "mali_memory_virtual.h"
+
+
+/**
+*internal helper to link node into the rb-tree
+*/
+static inline void _mali_vma_offset_add_rb(struct mali_allocation_manager *mgr,
+ struct mali_vma_node *node)
+{
+ struct rb_node **iter = &mgr->allocation_mgr_rb.rb_node;
+ struct rb_node *parent = NULL;
+ struct mali_vma_node *iter_node;
+
+ while (likely(*iter)) {
+ parent = *iter;
+ iter_node = rb_entry(*iter, struct mali_vma_node, vm_rb);
+
+ if (node->vm_node.start < iter_node->vm_node.start)
+ iter = &(*iter)->rb_left;
+ else if (node->vm_node.start > iter_node->vm_node.start)
+ iter = &(*iter)->rb_right;
+ else
+ MALI_DEBUG_ASSERT(0);
+ }
+
+ rb_link_node(&node->vm_rb, parent, iter);
+ rb_insert_color(&node->vm_rb, &mgr->allocation_mgr_rb);
+}
+
+/**
+ * mali_vma_offset_add() - Add offset node to RB Tree
+ */
+int mali_vma_offset_add(struct mali_allocation_manager *mgr,
+ struct mali_vma_node *node)
+{
+ int ret = 0;
+ write_lock(&mgr->vm_lock);
+
+ if (node->vm_node.allocated) {
+ goto out;
+ }
+
+ _mali_vma_offset_add_rb(mgr, node);
+ /* set to allocated */
+ node->vm_node.allocated = 1;
+
+out:
+ write_unlock(&mgr->vm_lock);
+ return ret;
+}
+
+/**
+ * mali_vma_offset_remove() - Remove offset node from RB tree
+ */
+void mali_vma_offset_remove(struct mali_allocation_manager *mgr,
+ struct mali_vma_node *node)
+{
+ write_lock(&mgr->vm_lock);
+
+ if (node->vm_node.allocated) {
+ rb_erase(&node->vm_rb, &mgr->allocation_mgr_rb);
+ memset(&node->vm_node, 0, sizeof(node->vm_node));
+ }
+ write_unlock(&mgr->vm_lock);
+}
+
+/**
+* mali_vma_offset_search - Search the node in RB tree
+*/
+struct mali_vma_node *mali_vma_offset_search(struct mali_allocation_manager *mgr,
+ unsigned long start, unsigned long pages)
+{
+ struct mali_vma_node *node, *best;
+ struct rb_node *iter;
+ unsigned long offset;
+ read_lock(&mgr->vm_lock);
+
+ iter = mgr->allocation_mgr_rb.rb_node;
+ best = NULL;
+
+ while (likely(iter)) {
+ node = rb_entry(iter, struct mali_vma_node, vm_rb);
+ offset = node->vm_node.start;
+ if (start >= offset) {
+ iter = iter->rb_right;
+ best = node;
+ if (start == offset)
+ break;
+ } else {
+ iter = iter->rb_left;
+ }
+ }
+
+ if (best) {
+ offset = best->vm_node.start + best->vm_node.size;
+ if (offset <= start + pages)
+ best = NULL;
+ }
+ read_unlock(&mgr->vm_lock);
+
+ return best;
+}
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_memory_virtual.h b/drivers/gpu/arm/utgard/linux/mali_memory_virtual.h
new file mode 100644
index 000000000000..3e2d48f44e28
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_memory_virtual.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __MALI_GPU_VMEM_H__
+#define __MALI_GPU_VMEM_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include "mali_memory_types.h"
+#include "mali_memory_os_alloc.h"
+#include "mali_memory_manager.h"
+
+
+
+int mali_vma_offset_add(struct mali_allocation_manager *mgr,
+ struct mali_vma_node *node);
+
+void mali_vma_offset_remove(struct mali_allocation_manager *mgr,
+ struct mali_vma_node *node);
+
+struct mali_vma_node *mali_vma_offset_search(struct mali_allocation_manager *mgr,
+ unsigned long start, unsigned long pages);
+
+#endif
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_atomics.c b/drivers/gpu/arm/utgard/linux/mali_osk_atomics.c
new file mode 100644
index 000000000000..ba630e2730cf
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_atomics.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2010, 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_atomics.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <asm/atomic.h>
+#include "mali_kernel_common.h"
+
+void _mali_osk_atomic_dec(_mali_osk_atomic_t *atom)
+{
+ atomic_dec((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_dec_return(_mali_osk_atomic_t *atom)
+{
+ return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_inc(_mali_osk_atomic_t *atom)
+{
+ atomic_inc((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_inc_return(_mali_osk_atomic_t *atom)
+{
+ return atomic_inc_return((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val)
+{
+ MALI_DEBUG_ASSERT_POINTER(atom);
+ atomic_set((atomic_t *)&atom->u.val, val);
+}
+
+u32 _mali_osk_atomic_read(_mali_osk_atomic_t *atom)
+{
+ return atomic_read((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_term(_mali_osk_atomic_t *atom)
+{
+ MALI_IGNORE(atom);
+}
+
+u32 _mali_osk_atomic_xchg(_mali_osk_atomic_t *atom, u32 val)
+{
+ return atomic_xchg((atomic_t *)&atom->u.val, val);
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_bitmap.c b/drivers/gpu/arm/utgard/linux/mali_osk_bitmap.c
new file mode 100644
index 000000000000..01ca38235b20
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_bitmap.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2010, 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_bitmap.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/bitmap.h>
+#include <linux/vmalloc.h>
+#include "common/mali_kernel_common.h"
+#include "mali_osk_types.h"
+#include "mali_osk.h"
+
+u32 _mali_osk_bitmap_alloc(struct _mali_osk_bitmap *bitmap)
+{
+ u32 obj;
+
+ MALI_DEBUG_ASSERT_POINTER(bitmap);
+
+ _mali_osk_spinlock_lock(bitmap->lock);
+
+ obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->reserve);
+
+ if (obj < bitmap->max) {
+ set_bit(obj, bitmap->table);
+ } else {
+ obj = -1;
+ }
+
+ if (obj != -1)
+ --bitmap->avail;
+ _mali_osk_spinlock_unlock(bitmap->lock);
+
+ return obj;
+}
+
+void _mali_osk_bitmap_free(struct _mali_osk_bitmap *bitmap, u32 obj)
+{
+ MALI_DEBUG_ASSERT_POINTER(bitmap);
+
+ _mali_osk_bitmap_free_range(bitmap, obj, 1);
+}
+
+u32 _mali_osk_bitmap_alloc_range(struct _mali_osk_bitmap *bitmap, int cnt)
+{
+ u32 obj;
+
+ MALI_DEBUG_ASSERT_POINTER(bitmap);
+
+ if (0 >= cnt) {
+ return -1;
+ }
+
+ if (1 == cnt) {
+ return _mali_osk_bitmap_alloc(bitmap);
+ }
+
+ _mali_osk_spinlock_lock(bitmap->lock);
+ obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
+ bitmap->last, cnt, 0);
+
+ if (obj >= bitmap->max) {
+ obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
+ bitmap->reserve, cnt, 0);
+ }
+
+ if (obj < bitmap->max) {
+ bitmap_set(bitmap->table, obj, cnt);
+
+ bitmap->last = (obj + cnt);
+ if (bitmap->last >= bitmap->max) {
+ bitmap->last = bitmap->reserve;
+ }
+ } else {
+ obj = -1;
+ }
+
+ if (obj != -1) {
+ bitmap->avail -= cnt;
+ }
+
+ _mali_osk_spinlock_unlock(bitmap->lock);
+
+ return obj;
+}
+
+u32 _mali_osk_bitmap_avail(struct _mali_osk_bitmap *bitmap)
+{
+ MALI_DEBUG_ASSERT_POINTER(bitmap);
+
+ return bitmap->avail;
+}
+
+void _mali_osk_bitmap_free_range(struct _mali_osk_bitmap *bitmap, u32 obj, int cnt)
+{
+ MALI_DEBUG_ASSERT_POINTER(bitmap);
+
+ _mali_osk_spinlock_lock(bitmap->lock);
+ bitmap_clear(bitmap->table, obj, cnt);
+ bitmap->last = min(bitmap->last, obj);
+
+ bitmap->avail += cnt;
+ _mali_osk_spinlock_unlock(bitmap->lock);
+}
+
+int _mali_osk_bitmap_init(struct _mali_osk_bitmap *bitmap, u32 num, u32 reserve)
+{
+ MALI_DEBUG_ASSERT_POINTER(bitmap);
+ MALI_DEBUG_ASSERT(reserve <= num);
+
+ bitmap->reserve = reserve;
+ bitmap->last = reserve;
+ bitmap->max = num;
+ bitmap->avail = num - reserve;
+ bitmap->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_UNORDERED, _MALI_OSK_LOCK_ORDER_FIRST);
+ if (!bitmap->lock) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+ bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
+ sizeof(long), GFP_KERNEL);
+ if (!bitmap->table) {
+ _mali_osk_spinlock_term(bitmap->lock);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_bitmap_term(struct _mali_osk_bitmap *bitmap)
+{
+ MALI_DEBUG_ASSERT_POINTER(bitmap);
+
+ if (NULL != bitmap->lock) {
+ _mali_osk_spinlock_term(bitmap->lock);
+ }
+
+ if (NULL != bitmap->table) {
+ kfree(bitmap->table);
+ }
+}
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_irq.c b/drivers/gpu/arm/utgard/linux/mali_osk_irq.c
new file mode 100644
index 000000000000..539832d9125a
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_irq.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_irq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h> /* For memory allocation */
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+typedef struct _mali_osk_irq_t_struct {
+ u32 irqnum;
+ void *data;
+ _mali_osk_irq_uhandler_t uhandler;
+} mali_osk_irq_object_t;
+
+typedef irqreturn_t (*irq_handler_func_t)(int, void *, struct pt_regs *);
+static irqreturn_t irq_handler_upper_half(int port_name, void *dev_id); /* , struct pt_regs *regs*/
+
+#if defined(DEBUG)
+
+struct test_interrupt_data {
+ _mali_osk_irq_ack_t ack_func;
+ void *probe_data;
+ mali_bool interrupt_received;
+ wait_queue_head_t wq;
+};
+
+static irqreturn_t test_interrupt_upper_half(int port_name, void *dev_id)
+{
+ irqreturn_t ret = IRQ_NONE;
+ struct test_interrupt_data *data = (struct test_interrupt_data *)dev_id;
+
+ if (_MALI_OSK_ERR_OK == data->ack_func(data->probe_data)) {
+ data->interrupt_received = MALI_TRUE;
+ wake_up(&data->wq);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static _mali_osk_errcode_t test_interrupt(u32 irqnum,
+ _mali_osk_irq_trigger_t trigger_func,
+ _mali_osk_irq_ack_t ack_func,
+ void *probe_data,
+ const char *description)
+{
+ unsigned long irq_flags = 0;
+ struct test_interrupt_data data = {
+ .ack_func = ack_func,
+ .probe_data = probe_data,
+ .interrupt_received = MALI_FALSE,
+ };
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ irq_flags |= IRQF_SHARED;
+#endif /* defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+ if (0 != request_irq(irqnum, test_interrupt_upper_half, irq_flags, description, &data)) {
+ MALI_DEBUG_PRINT(2, ("Unable to install test IRQ handler for core '%s'\n", description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ init_waitqueue_head(&data.wq);
+
+ trigger_func(probe_data);
+ wait_event_timeout(data.wq, data.interrupt_received, 100);
+
+ free_irq(irqnum, &data);
+
+ if (data.interrupt_received) {
+ MALI_DEBUG_PRINT(3, ("%s: Interrupt test OK\n", description));
+ return _MALI_OSK_ERR_OK;
+ } else {
+ MALI_PRINT_ERROR(("%s: Failed interrupt test on %u\n", description, irqnum));
+ return _MALI_OSK_ERR_FAULT;
+ }
+}
+
+#endif /* defined(DEBUG) */
+
+_mali_osk_irq_t *_mali_osk_irq_init(u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description)
+{
+ mali_osk_irq_object_t *irq_object;
+ unsigned long irq_flags = 0;
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ irq_flags |= IRQF_SHARED;
+#endif /* defined(CONFIG_MALI_SHARED_INTERRUPTS) */
+
+ irq_object = kmalloc(sizeof(mali_osk_irq_object_t), GFP_KERNEL);
+ if (NULL == irq_object) {
+ return NULL;
+ }
+
+ if (-1 == irqnum) {
+ /* Probe for IRQ */
+ if ((NULL != trigger_func) && (NULL != ack_func)) {
+ unsigned long probe_count = 3;
+ _mali_osk_errcode_t err;
+ int irq;
+
+ MALI_DEBUG_PRINT(2, ("Probing for irq\n"));
+
+ do {
+ unsigned long mask;
+
+ mask = probe_irq_on();
+ trigger_func(probe_data);
+
+ _mali_osk_time_ubusydelay(5);
+
+ irq = probe_irq_off(mask);
+ err = ack_func(probe_data);
+ } while (irq < 0 && (err == _MALI_OSK_ERR_OK) && probe_count--);
+
+ if (irq < 0 || (_MALI_OSK_ERR_OK != err)) irqnum = -1;
+ else irqnum = irq;
+ } else irqnum = -1; /* no probe functions, fault */
+
+ if (-1 != irqnum) {
+ /* found an irq */
+ MALI_DEBUG_PRINT(2, ("Found irq %d\n", irqnum));
+ } else {
+ MALI_DEBUG_PRINT(2, ("Probe for irq failed\n"));
+ }
+ }
+
+ irq_object->irqnum = irqnum;
+ irq_object->uhandler = uhandler;
+ irq_object->data = int_data;
+
+ if (-1 == irqnum) {
+ MALI_DEBUG_PRINT(2, ("No IRQ for core '%s' found during probe\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+
+#if defined(DEBUG)
+ /* Verify that the configured interrupt settings are working */
+ if (_MALI_OSK_ERR_OK != test_interrupt(irqnum, trigger_func, ack_func, probe_data, description)) {
+ MALI_DEBUG_PRINT(2, ("Test of IRQ(%d) handler for core '%s' failed\n", irqnum, description));
+ kfree(irq_object);
+ return NULL;
+ }
+#endif
+
+ if (0 != request_irq(irqnum, irq_handler_upper_half, irq_flags, description, irq_object)) {
+ MALI_DEBUG_PRINT(2, ("Unable to install IRQ handler for core '%s'\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+
+ return irq_object;
+}
+
+void _mali_osk_irq_term(_mali_osk_irq_t *irq)
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+ free_irq(irq_object->irqnum, irq_object);
+ kfree(irq_object);
+}
+
+
+/** This function is called directly in interrupt context from the OS just after
+ * the CPU get the hw-irq from mali, or other devices on the same IRQ-channel.
+ * It is registered one of these function for each mali core. When an interrupt
+ * arrives this function will be called equal times as registered mali cores.
+ * That means that we only check one mali core in one function call, and the
+ * core we check for each turn is given by the \a dev_id variable.
+ * If we detect an pending interrupt on the given core, we mask the interrupt
+ * out by settging the core's IRQ_MASK register to zero.
+ * Then we schedule the mali_core_irq_handler_bottom_half to run as high priority
+ * work queue job.
+ */
+static irqreturn_t irq_handler_upper_half(int port_name, void *dev_id) /* , struct pt_regs *regs*/
+{
+ irqreturn_t ret = IRQ_NONE;
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)dev_id;
+
+ if (_MALI_OSK_ERR_OK == irq_object->uhandler(irq_object->data)) {
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_locks.c b/drivers/gpu/arm/utgard/linux/mali_osk_locks.c
new file mode 100644
index 000000000000..50c0a9d23819
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_locks.c
@@ -0,0 +1,287 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_locks.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk_locks.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+
+#ifdef DEBUG
+#ifdef LOCK_ORDER_CHECKING
+static DEFINE_SPINLOCK(lock_tracking_lock);
+static mali_bool add_lock_to_log_and_check(struct _mali_osk_lock_debug_s *lock, uint32_t tid);
+static void remove_lock_from_log(struct _mali_osk_lock_debug_s *lock, uint32_t tid);
+static const char *const lock_order_to_string(_mali_osk_lock_order_t order);
+#endif /* LOCK_ORDER_CHECKING */
+
+void _mali_osk_locks_debug_init(struct _mali_osk_lock_debug_s *checker, _mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+{
+ checker->orig_flags = flags;
+ checker->owner = 0;
+
+#ifdef LOCK_ORDER_CHECKING
+ checker->order = order;
+ checker->next = NULL;
+#endif
+}
+
+void _mali_osk_locks_debug_add(struct _mali_osk_lock_debug_s *checker)
+{
+ checker->owner = _mali_osk_get_tid();
+
+#ifdef LOCK_ORDER_CHECKING
+ if (!(checker->orig_flags & _MALI_OSK_LOCKFLAG_UNORDERED)) {
+ if (!add_lock_to_log_and_check(checker, _mali_osk_get_tid())) {
+ printk(KERN_ERR "%d: ERROR lock %p taken while holding a lock of a higher order.\n",
+ _mali_osk_get_tid(), checker);
+ dump_stack();
+ }
+ }
+#endif
+}
+
+void _mali_osk_locks_debug_remove(struct _mali_osk_lock_debug_s *checker)
+{
+
+#ifdef LOCK_ORDER_CHECKING
+ if (!(checker->orig_flags & _MALI_OSK_LOCKFLAG_UNORDERED)) {
+ remove_lock_from_log(checker, _mali_osk_get_tid());
+ }
+#endif
+ checker->owner = 0;
+}
+
+
+#ifdef LOCK_ORDER_CHECKING
+/* Lock order checking
+ * -------------------
+ *
+ * To assure that lock ordering scheme defined by _mali_osk_lock_order_t is strictly adhered to, the
+ * following function will, together with a linked list and some extra members in _mali_osk_lock_debug_s,
+ * make sure that a lock that is taken has a higher order than the current highest-order lock a
+ * thread holds.
+ *
+ * This is done in the following manner:
+ * - A linked list keeps track of locks held by a thread.
+ * - A `next' pointer is added to each lock. This is used to chain the locks together.
+ * - When taking a lock, the `add_lock_to_log_and_check' makes sure that taking
+ * the given lock is legal. It will follow the linked list to find the last
+ * lock taken by this thread. If the last lock's order was lower than the
+ * lock that is to be taken, it appends the new lock to the list and returns
+ * true, if not, it return false. This return value is assert()'ed on in
+ * _mali_osk_lock_wait().
+ */
+
+static struct _mali_osk_lock_debug_s *lock_lookup_list;
+
+static void dump_lock_tracking_list(void)
+{
+ struct _mali_osk_lock_debug_s *l;
+ u32 n = 1;
+
+ /* print list for debugging purposes */
+ l = lock_lookup_list;
+
+ while (NULL != l) {
+ printk(" [lock: %p, tid_owner: %d, order: %d] ->", l, l->owner, l->order);
+ l = l->next;
+ MALI_DEBUG_ASSERT(n++ < 100);
+ }
+ printk(" NULL\n");
+}
+
+static int tracking_list_length(void)
+{
+ struct _mali_osk_lock_debug_s *l;
+ u32 n = 0;
+ l = lock_lookup_list;
+
+ while (NULL != l) {
+ l = l->next;
+ n++;
+ MALI_DEBUG_ASSERT(n < 100);
+ }
+ return n;
+}
+
+static mali_bool add_lock_to_log_and_check(struct _mali_osk_lock_debug_s *lock, uint32_t tid)
+{
+ mali_bool ret = MALI_FALSE;
+ _mali_osk_lock_order_t highest_order_for_tid = _MALI_OSK_LOCK_ORDER_FIRST;
+ struct _mali_osk_lock_debug_s *highest_order_lock = (struct _mali_osk_lock_debug_s *)0xbeefbabe;
+ struct _mali_osk_lock_debug_s *l;
+ unsigned long local_lock_flag;
+ u32 len;
+
+ spin_lock_irqsave(&lock_tracking_lock, local_lock_flag);
+ len = tracking_list_length();
+
+ l = lock_lookup_list;
+ if (NULL == l) { /* This is the first lock taken by this thread -- record and return true */
+ lock_lookup_list = lock;
+ spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag);
+ return MALI_TRUE;
+ } else {
+ /* Traverse the locks taken and find the lock of the highest order.
+ * Since several threads may hold locks, each lock's owner must be
+ * checked so that locks not owned by this thread can be ignored. */
+ for (;;) {
+ MALI_DEBUG_ASSERT_POINTER(l);
+ if (tid == l->owner && l->order >= highest_order_for_tid) {
+ highest_order_for_tid = l->order;
+ highest_order_lock = l;
+ }
+
+ if (NULL != l->next) {
+ l = l->next;
+ } else {
+ break;
+ }
+ }
+
+ l->next = lock;
+ l->next = NULL;
+ }
+
+ /* We have now found the highest order lock currently held by this thread and can see if it is
+ * legal to take the requested lock. */
+ ret = highest_order_for_tid < lock->order;
+
+ if (!ret) {
+ printk(KERN_ERR "Took lock of order %d (%s) while holding lock of order %d (%s)\n",
+ lock->order, lock_order_to_string(lock->order),
+ highest_order_for_tid, lock_order_to_string(highest_order_for_tid));
+ dump_lock_tracking_list();
+ }
+
+ if (len + 1 != tracking_list_length()) {
+ printk(KERN_ERR "************ lock: %p\n", lock);
+ printk(KERN_ERR "************ before: %d *** after: %d ****\n", len, tracking_list_length());
+ dump_lock_tracking_list();
+ MALI_DEBUG_ASSERT_POINTER(NULL);
+ }
+
+ spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag);
+ return ret;
+}
+
+static void remove_lock_from_log(struct _mali_osk_lock_debug_s *lock, uint32_t tid)
+{
+ struct _mali_osk_lock_debug_s *curr;
+ struct _mali_osk_lock_debug_s *prev = NULL;
+ unsigned long local_lock_flag;
+ u32 len;
+ u32 n = 0;
+
+ spin_lock_irqsave(&lock_tracking_lock, local_lock_flag);
+ len = tracking_list_length();
+ curr = lock_lookup_list;
+
+ if (NULL == curr) {
+ printk(KERN_ERR "Error: Lock tracking list was empty on call to remove_lock_from_log\n");
+ dump_lock_tracking_list();
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(curr);
+
+
+ while (lock != curr) {
+ prev = curr;
+
+ MALI_DEBUG_ASSERT_POINTER(curr);
+ curr = curr->next;
+ MALI_DEBUG_ASSERT(n++ < 100);
+ }
+
+ if (NULL == prev) {
+ lock_lookup_list = curr->next;
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(curr);
+ MALI_DEBUG_ASSERT_POINTER(prev);
+ prev->next = curr->next;
+ }
+
+ lock->next = NULL;
+
+ if (len - 1 != tracking_list_length()) {
+ printk(KERN_ERR "************ lock: %p\n", lock);
+ printk(KERN_ERR "************ before: %d *** after: %d ****\n", len, tracking_list_length());
+ dump_lock_tracking_list();
+ MALI_DEBUG_ASSERT_POINTER(NULL);
+ }
+
+ spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag);
+}
+
+static const char *const lock_order_to_string(_mali_osk_lock_order_t order)
+{
+ switch (order) {
+ case _MALI_OSK_LOCK_ORDER_SESSIONS:
+ return "_MALI_OSK_LOCK_ORDER_SESSIONS";
+ break;
+ case _MALI_OSK_LOCK_ORDER_MEM_SESSION:
+ return "_MALI_OSK_LOCK_ORDER_MEM_SESSION";
+ break;
+ case _MALI_OSK_LOCK_ORDER_MEM_INFO:
+ return "_MALI_OSK_LOCK_ORDER_MEM_INFO";
+ break;
+ case _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE:
+ return "_MALI_OSK_LOCK_ORDER_MEM_PT_CACHE";
+ break;
+ case _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP:
+ return "_MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP";
+ break;
+ case _MALI_OSK_LOCK_ORDER_PM_EXECUTION:
+ return "_MALI_OSK_LOCK_ORDER_PM_EXECUTION";
+ break;
+ case _MALI_OSK_LOCK_ORDER_EXECUTOR:
+ return "_MALI_OSK_LOCK_ORDER_EXECUTOR";
+ break;
+ case _MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM:
+ return "_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM";
+ break;
+ case _MALI_OSK_LOCK_ORDER_SCHEDULER:
+ return "_MALI_OSK_LOCK_ORDER_SCHEDULER";
+ break;
+ case _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED:
+ return "_MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED";
+ break;
+ case _MALI_OSK_LOCK_ORDER_DMA_COMMAND:
+ return "_MALI_OSK_LOCK_ORDER_DMA_COMMAND";
+ break;
+ case _MALI_OSK_LOCK_ORDER_PROFILING:
+ return "_MALI_OSK_LOCK_ORDER_PROFILING";
+ break;
+ case _MALI_OSK_LOCK_ORDER_L2:
+ return "_MALI_OSK_LOCK_ORDER_L2";
+ break;
+ case _MALI_OSK_LOCK_ORDER_L2_COMMAND:
+ return "_MALI_OSK_LOCK_ORDER_L2_COMMAND";
+ break;
+ case _MALI_OSK_LOCK_ORDER_UTILIZATION:
+ return "_MALI_OSK_LOCK_ORDER_UTILIZATION";
+ break;
+ case _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS:
+ return "_MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS";
+ break;
+ case _MALI_OSK_LOCK_ORDER_PM_STATE:
+ return "_MALI_OSK_LOCK_ORDER_PM_STATE";
+ break;
+ default:
+ return "<UNKNOWN_LOCK_ORDER>";
+ }
+}
+#endif /* LOCK_ORDER_CHECKING */
+#endif /* DEBUG */
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_locks.h b/drivers/gpu/arm/utgard/linux/mali_osk_locks.h
new file mode 100644
index 000000000000..aa32a81e7496
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_locks.h
@@ -0,0 +1,326 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_locks.h
+ * Defines OS abstraction of lock and mutex
+ */
+#ifndef _MALI_OSK_LOCKS_H
+#define _MALI_OSK_LOCKS_H
+
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+
+#include <linux/slab.h>
+
+#include "mali_osk_types.h"
+
+#ifdef _cplusplus
+extern "C" {
+#endif
+
+ /* When DEBUG is enabled, this struct will be used to track owner, mode and order checking */
+#ifdef DEBUG
+ struct _mali_osk_lock_debug_s {
+ u32 owner;
+ _mali_osk_lock_flags_t orig_flags;
+ _mali_osk_lock_order_t order;
+ struct _mali_osk_lock_debug_s *next;
+ };
+#endif
+
+ /* Anstraction of spinlock_t */
+ struct _mali_osk_spinlock_s {
+#ifdef DEBUG
+ struct _mali_osk_lock_debug_s checker;
+#endif
+ spinlock_t spinlock;
+ };
+
+ /* Abstration of spinlock_t and lock flag which is used to store register's state before locking */
+ struct _mali_osk_spinlock_irq_s {
+#ifdef DEBUG
+ struct _mali_osk_lock_debug_s checker;
+#endif
+
+ spinlock_t spinlock;
+ unsigned long flags;
+ };
+
+ /* Abstraction of rw_semaphore in OS */
+ struct _mali_osk_mutex_rw_s {
+#ifdef DEBUG
+ struct _mali_osk_lock_debug_s checker;
+ _mali_osk_lock_mode_t mode;
+#endif
+
+ struct rw_semaphore rw_sema;
+ };
+
+ /* Mutex and mutex_interruptible functions share the same osk mutex struct */
+ struct _mali_osk_mutex_s {
+#ifdef DEBUG
+ struct _mali_osk_lock_debug_s checker;
+#endif
+ struct mutex mutex;
+ };
+
+#ifdef DEBUG
+ /** @brief _mali_osk_locks_debug_init/add/remove() functions are declared when DEBUG is enabled and
+ * defined in file mali_osk_locks.c. When LOCK_ORDER_CHECKING is enabled, calling these functions when we
+ * init/lock/unlock a lock/mutex, we could track lock order of a given tid. */
+ void _mali_osk_locks_debug_init(struct _mali_osk_lock_debug_s *checker, _mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order);
+ void _mali_osk_locks_debug_add(struct _mali_osk_lock_debug_s *checker);
+ void _mali_osk_locks_debug_remove(struct _mali_osk_lock_debug_s *checker);
+
+ /** @brief This function can return a given lock's owner when DEBUG is enabled. */
+ static inline u32 _mali_osk_lock_get_owner(struct _mali_osk_lock_debug_s *lock)
+ {
+ return lock->owner;
+ }
+#else
+#define _mali_osk_locks_debug_init(x, y, z) do {} while (0)
+#define _mali_osk_locks_debug_add(x) do {} while (0)
+#define _mali_osk_locks_debug_remove(x) do {} while (0)
+#endif
+
+ /** @brief Before use _mali_osk_spin_lock, init function should be used to allocate memory and initial spinlock*/
+ static inline _mali_osk_spinlock_t *_mali_osk_spinlock_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+ {
+ _mali_osk_spinlock_t *lock = NULL;
+
+ lock = kmalloc(sizeof(_mali_osk_spinlock_t), GFP_KERNEL);
+ if (NULL == lock) {
+ return NULL;
+ }
+ spin_lock_init(&lock->spinlock);
+ _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+ return lock;
+ }
+
+ /** @brief Lock a spinlock */
+ static inline void _mali_osk_spinlock_lock(_mali_osk_spinlock_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ spin_lock(&lock->spinlock);
+ _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+ }
+
+ /** @brief Unlock a spinlock */
+ static inline void _mali_osk_spinlock_unlock(_mali_osk_spinlock_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+ spin_unlock(&lock->spinlock);
+ }
+
+ /** @brief Free a memory block which the argument lock pointed to and its type must be
+ * _mali_osk_spinlock_t *. */
+ static inline void _mali_osk_spinlock_term(_mali_osk_spinlock_t *lock)
+ {
+ /* Parameter validation */
+ BUG_ON(NULL == lock);
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+ }
+
+ /** @brief Before _mali_osk_spinlock_irq_lock/unlock/term() is called, init function should be
+ * called to initial spinlock and flags in struct _mali_osk_spinlock_irq_t. */
+ static inline _mali_osk_spinlock_irq_t *_mali_osk_spinlock_irq_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+ {
+ _mali_osk_spinlock_irq_t *lock = NULL;
+ lock = kmalloc(sizeof(_mali_osk_spinlock_irq_t), GFP_KERNEL);
+
+ if (NULL == lock) {
+ return NULL;
+ }
+
+ lock->flags = 0;
+ spin_lock_init(&lock->spinlock);
+ _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+ return lock;
+ }
+
+ /** @brief Lock spinlock and save the register's state */
+ static inline void _mali_osk_spinlock_irq_lock(_mali_osk_spinlock_irq_t *lock)
+ {
+ unsigned long tmp_flags;
+
+ BUG_ON(NULL == lock);
+ spin_lock_irqsave(&lock->spinlock, tmp_flags);
+ lock->flags = tmp_flags;
+ _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+ }
+
+ /** @brief Unlock spinlock with saved register's state */
+ static inline void _mali_osk_spinlock_irq_unlock(_mali_osk_spinlock_irq_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+ spin_unlock_irqrestore(&lock->spinlock, lock->flags);
+ }
+
+ /** @brief Destroy a given memory block which lock pointed to, and the lock type must be
+ * _mali_osk_spinlock_irq_t *. */
+ static inline void _mali_osk_spinlock_irq_term(_mali_osk_spinlock_irq_t *lock)
+ {
+ /* Parameter validation */
+ BUG_ON(NULL == lock);
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+ }
+
+ /** @brief Before _mali_osk_mutex_rw_wait/signal/term() is called, we should call
+ * _mali_osk_mutex_rw_init() to kmalloc a memory block and initial part of elements in it. */
+ static inline _mali_osk_mutex_rw_t *_mali_osk_mutex_rw_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+ {
+ _mali_osk_mutex_rw_t *lock = NULL;
+
+ lock = kmalloc(sizeof(_mali_osk_mutex_rw_t), GFP_KERNEL);
+
+ if (NULL == lock) {
+ return NULL;
+ }
+
+ init_rwsem(&lock->rw_sema);
+ _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+ return lock;
+ }
+
+ /** @brief When call _mali_osk_mutex_rw_wait/signal() functions, the second argument mode
+ * should be assigned with value _MALI_OSK_LOCKMODE_RO or _MALI_OSK_LOCKMODE_RW */
+ static inline void _mali_osk_mutex_rw_wait(_mali_osk_mutex_rw_t *lock, _mali_osk_lock_mode_t mode)
+ {
+ BUG_ON(NULL == lock);
+ BUG_ON(!(_MALI_OSK_LOCKMODE_RO == mode || _MALI_OSK_LOCKMODE_RW == mode));
+
+ if (mode == _MALI_OSK_LOCKMODE_RO) {
+ down_read(&lock->rw_sema);
+ } else {
+ down_write(&lock->rw_sema);
+ }
+
+#ifdef DEBUG
+ if (mode == _MALI_OSK_LOCKMODE_RW) {
+ lock->mode = mode;
+ } else { /* mode == _MALI_OSK_LOCKMODE_RO */
+ lock->mode = mode;
+ }
+ _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+#endif
+ }
+
+ /** @brief Up lock->rw_sema with up_read/write() accordinf argument mode's value. */
+ static inline void _mali_osk_mutex_rw_signal(_mali_osk_mutex_rw_t *lock, _mali_osk_lock_mode_t mode)
+ {
+ BUG_ON(NULL == lock);
+ BUG_ON(!(_MALI_OSK_LOCKMODE_RO == mode || _MALI_OSK_LOCKMODE_RW == mode));
+#ifdef DEBUG
+ /* make sure the thread releasing the lock actually was the owner */
+ if (mode == _MALI_OSK_LOCKMODE_RW) {
+ _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+ /* This lock now has no owner */
+ lock->checker.owner = 0;
+ }
+#endif
+
+ if (mode == _MALI_OSK_LOCKMODE_RO) {
+ up_read(&lock->rw_sema);
+ } else {
+ up_write(&lock->rw_sema);
+ }
+ }
+
+ /** @brief Free a given memory block which lock pointed to and its type must be
+ * _mali_sok_mutex_rw_t *. */
+ static inline void _mali_osk_mutex_rw_term(_mali_osk_mutex_rw_t *lock)
+ {
+ /* Parameter validation */
+ BUG_ON(NULL == lock);
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+ }
+
+ /** @brief Mutex & mutex_interruptible share the same init and term function, because they have the
+ * same osk mutex struct, and the difference between them is which locking function they use */
+ static inline _mali_osk_mutex_t *_mali_osk_mutex_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order)
+ {
+ _mali_osk_mutex_t *lock = NULL;
+
+ lock = kmalloc(sizeof(_mali_osk_mutex_t), GFP_KERNEL);
+
+ if (NULL == lock) {
+ return NULL;
+ }
+ mutex_init(&lock->mutex);
+
+ _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order);
+ return lock;
+ }
+
+ /** @brief Lock the lock->mutex with mutex_lock_interruptible function */
+ static inline _mali_osk_errcode_t _mali_osk_mutex_wait_interruptible(_mali_osk_mutex_t *lock)
+ {
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ BUG_ON(NULL == lock);
+
+ if (mutex_lock_interruptible(&lock->mutex)) {
+ printk(KERN_WARNING "Mali: Can not lock mutex\n");
+ err = _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+
+ _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+ return err;
+ }
+
+ /** @brief Unlock the lock->mutex which is locked with mutex_lock_interruptible() function. */
+ static inline void _mali_osk_mutex_signal_interruptible(_mali_osk_mutex_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+ mutex_unlock(&lock->mutex);
+ }
+
+ /** @brief Lock the lock->mutex just with mutex_lock() function which could not be interruptted. */
+ static inline void _mali_osk_mutex_wait(_mali_osk_mutex_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ mutex_lock(&lock->mutex);
+ _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock);
+ }
+
+ /** @brief Unlock the lock->mutex which is locked with mutex_lock() function. */
+ static inline void _mali_osk_mutex_signal(_mali_osk_mutex_t *lock)
+ {
+ BUG_ON(NULL == lock);
+ _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock);
+ mutex_unlock(&lock->mutex);
+ }
+
+ /** @brief Free a given memory block which lock point. */
+ static inline void _mali_osk_mutex_term(_mali_osk_mutex_t *lock)
+ {
+ /* Parameter validation */
+ BUG_ON(NULL == lock);
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+ }
+
+#ifdef _cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_low_level_mem.c b/drivers/gpu/arm/utgard/linux/mali_osk_low_level_mem.c
new file mode 100644
index 000000000000..c14e872a7d63
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_low_level_mem.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_low_level_mem.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+
+void _mali_osk_mem_barrier(void)
+{
+ mb();
+}
+
+void _mali_osk_write_mem_barrier(void)
+{
+ wmb();
+}
+
+mali_io_address _mali_osk_mem_mapioregion(uintptr_t phys, u32 size, const char *description)
+{
+ return (mali_io_address)ioremap_nocache(phys, size);
+}
+
+void _mali_osk_mem_unmapioregion(uintptr_t phys, u32 size, mali_io_address virt)
+{
+ iounmap((void *)virt);
+}
+
+_mali_osk_errcode_t inline _mali_osk_mem_reqregion(uintptr_t phys, u32 size, const char *description)
+{
+#if MALI_LICENSE_IS_GPL
+ return _MALI_OSK_ERR_OK; /* GPL driver gets the mem region for the resources registered automatically */
+#else
+ return ((NULL == request_mem_region(phys, size, description)) ? _MALI_OSK_ERR_NOMEM : _MALI_OSK_ERR_OK);
+#endif
+}
+
+void inline _mali_osk_mem_unreqregion(uintptr_t phys, u32 size)
+{
+#if !MALI_LICENSE_IS_GPL
+ release_mem_region(phys, size);
+#endif
+}
+
+void inline _mali_osk_mem_iowrite32_relaxed(volatile mali_io_address addr, u32 offset, u32 val)
+{
+ __raw_writel(cpu_to_le32(val), ((u8 *)addr) + offset);
+}
+
+u32 inline _mali_osk_mem_ioread32(volatile mali_io_address addr, u32 offset)
+{
+ return ioread32(((u8 *)addr) + offset);
+}
+
+void inline _mali_osk_mem_iowrite32(volatile mali_io_address addr, u32 offset, u32 val)
+{
+ iowrite32(val, ((u8 *)addr) + offset);
+}
+
+void _mali_osk_cache_flushall(void)
+{
+ /** @note Cached memory is not currently supported in this implementation */
+}
+
+void _mali_osk_cache_ensure_uncached_range_flushed(void *uncached_mapping, u32 offset, u32 size)
+{
+ _mali_osk_write_mem_barrier();
+}
+
+u32 _mali_osk_mem_write_safe(void __user *dest, const void __user *src, u32 size)
+{
+#define MALI_MEM_SAFE_COPY_BLOCK_SIZE 4096
+ u32 retval = 0;
+ void *temp_buf;
+
+ temp_buf = kmalloc(MALI_MEM_SAFE_COPY_BLOCK_SIZE, GFP_KERNEL);
+ if (NULL != temp_buf) {
+ u32 bytes_left_to_copy = size;
+ u32 i;
+ for (i = 0; i < size; i += MALI_MEM_SAFE_COPY_BLOCK_SIZE) {
+ u32 size_to_copy;
+ u32 size_copied;
+ u32 bytes_left;
+
+ if (bytes_left_to_copy > MALI_MEM_SAFE_COPY_BLOCK_SIZE) {
+ size_to_copy = MALI_MEM_SAFE_COPY_BLOCK_SIZE;
+ } else {
+ size_to_copy = bytes_left_to_copy;
+ }
+
+ bytes_left = copy_from_user(temp_buf, ((char *)src) + i, size_to_copy);
+ size_copied = size_to_copy - bytes_left;
+
+ bytes_left = copy_to_user(((char *)dest) + i, temp_buf, size_copied);
+ size_copied -= bytes_left;
+
+ bytes_left_to_copy -= size_copied;
+ retval += size_copied;
+
+ if (size_copied != size_to_copy) {
+ break; /* Early out, we was not able to copy this entire block */
+ }
+ }
+
+ kfree(temp_buf);
+ }
+
+ return retval;
+}
+
+_mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args)
+{
+ void __user *src;
+ void __user *dst;
+ struct mali_session_data *session;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ if (NULL == session) {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ src = (void __user *)(uintptr_t)args->src;
+ dst = (void __user *)(uintptr_t)args->dest;
+
+ /* Return number of bytes actually copied */
+ args->size = _mali_osk_mem_write_safe(dst, src, args->size);
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_mali.c b/drivers/gpu/arm/utgard/linux/mali_osk_mali.c
new file mode 100644
index 000000000000..a3749d8057b4
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_mali.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.c
+ * Implementation of the OS abstraction layer which is specific for the Mali kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/mali/mali_utgard.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h" /* MALI_xxx macros */
+#include "mali_osk.h" /* kernel side OS functions */
+#include "mali_kernel_linux.h"
+
+
+#if defined(CONFIG_MALI_DT) && !defined(CONFIG_MALI_PLAT_SPECIFIC_DT)
+
+#define MALI_OSK_INVALID_RESOURCE_ADDRESS 0xFFFFFFFF
+
+/**
+ * Define the max number of resource we could have.
+ */
+#define MALI_OSK_MAX_RESOURCE_NUMBER 27
+
+/**
+ * Define the max number of resource with interrupts, and they are
+ * the first 20 elements in array mali_osk_resource_bank.
+ */
+#define MALI_OSK_RESOURCE_WITH_IRQ_NUMBER 20
+
+/**
+ * pp core start and end location in mali_osk_resource_bank array.
+ */
+#define MALI_OSK_RESOURCE_PP_LOCATION_START 2
+#define MALI_OSK_RESOURCE_PP_LOCATION_END 17
+
+/**
+ * L2 cache start and end location in mali_osk_resource_bank array.
+ */
+#define MALI_OSK_RESOURCE_L2_LOCATION_START 20
+#define MALI_OSK_RESOURCE_l2_LOCATION_END 22
+
+/**
+ * DMA unit location.
+ */
+#define MALI_OSK_RESOURCE_DMA_LOCATION 26
+
+static _mali_osk_resource_t mali_osk_resource_bank[MALI_OSK_MAX_RESOURCE_NUMBER] = {
+ {.description = "Mali_GP", .base = MALI_OFFSET_GP, .irq_name = "IRQGP",},
+ {.description = "Mali_GP_MMU", .base = MALI_OFFSET_GP_MMU, .irq_name = "IRQGPMMU",},
+ {.description = "Mali_PP0", .base = MALI_OFFSET_PP0, .irq_name = "IRQPP0",},
+ {.description = "Mali_PP0_MMU", .base = MALI_OFFSET_PP0_MMU, .irq_name = "IRQPPMMU0",},
+ {.description = "Mali_PP1", .base = MALI_OFFSET_PP1, .irq_name = "IRQPP1",},
+ {.description = "Mali_PP1_MMU", .base = MALI_OFFSET_PP1_MMU, .irq_name = "IRQPPMMU1",},
+ {.description = "Mali_PP2", .base = MALI_OFFSET_PP2, .irq_name = "IRQPP2",},
+ {.description = "Mali_PP2_MMU", .base = MALI_OFFSET_PP2_MMU, .irq_name = "IRQPPMMU2",},
+ {.description = "Mali_PP3", .base = MALI_OFFSET_PP3, .irq_name = "IRQPP3",},
+ {.description = "Mali_PP3_MMU", .base = MALI_OFFSET_PP3_MMU, .irq_name = "IRQPPMMU3",},
+ {.description = "Mali_PP4", .base = MALI_OFFSET_PP4, .irq_name = "IRQPP4",},
+ {.description = "Mali_PP4_MMU", .base = MALI_OFFSET_PP4_MMU, .irq_name = "IRQPPMMU4",},
+ {.description = "Mali_PP5", .base = MALI_OFFSET_PP5, .irq_name = "IRQPP5",},
+ {.description = "Mali_PP5_MMU", .base = MALI_OFFSET_PP5_MMU, .irq_name = "IRQPPMMU5",},
+ {.description = "Mali_PP6", .base = MALI_OFFSET_PP6, .irq_name = "IRQPP6",},
+ {.description = "Mali_PP6_MMU", .base = MALI_OFFSET_PP6_MMU, .irq_name = "IRQPPMMU6",},
+ {.description = "Mali_PP7", .base = MALI_OFFSET_PP7, .irq_name = "IRQPP7",},
+ {.description = "Mali_PP7_MMU", .base = MALI_OFFSET_PP7_MMU, .irq_name = "IRQPPMMU",},
+ {.description = "Mali_PP_Broadcast", .base = MALI_OFFSET_PP_BCAST, .irq_name = "IRQPP",},
+ {.description = "Mali_PMU", .base = MALI_OFFSET_PMU, .irq_name = "IRQPMU",},
+ {.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE0,},
+ {.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE1,},
+ {.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE2,},
+ {.description = "Mali_PP_MMU_Broadcast", .base = MALI_OFFSET_PP_BCAST_MMU,},
+ {.description = "Mali_Broadcast", .base = MALI_OFFSET_BCAST,},
+ {.description = "Mali_DLBU", .base = MALI_OFFSET_DLBU,},
+ {.description = "Mali_DMA", .base = MALI_OFFSET_DMA,},
+};
+
+static int _mali_osk_get_compatible_name(const char **out_string)
+{
+ struct device_node *node = mali_platform_device->dev.of_node;
+
+ MALI_DEBUG_ASSERT(NULL != node);
+
+ return of_property_read_string(node, "compatible", out_string);
+}
+
+_mali_osk_errcode_t _mali_osk_resource_initialize(void)
+{
+ mali_bool mali_is_450 = MALI_FALSE, mali_is_470 = MALI_FALSE;
+ int i, pp_core_num = 0, l2_core_num = 0;
+ struct resource *res;
+ const char *compatible_name = NULL;
+
+ if (0 == _mali_osk_get_compatible_name(&compatible_name)) {
+ if (0 == strncmp(compatible_name, "arm,mali-450", strlen("arm,mali-450"))) {
+ mali_is_450 = MALI_TRUE;
+ MALI_DEBUG_PRINT(2, ("mali-450 device tree detected."));
+ } else if (0 == strncmp(compatible_name, "arm,mali-470", strlen("arm,mali-470"))) {
+ mali_is_470 = MALI_TRUE;
+ MALI_DEBUG_PRINT(2, ("mali-470 device tree detected."));
+ }
+ }
+
+ for (i = 0; i < MALI_OSK_RESOURCE_WITH_IRQ_NUMBER; i++) {
+ res = platform_get_resource_byname(mali_platform_device, IORESOURCE_IRQ, mali_osk_resource_bank[i].irq_name);
+ if (res) {
+ mali_osk_resource_bank[i].irq = res->start;
+ } else {
+ mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+ }
+ }
+
+ for (i = MALI_OSK_RESOURCE_PP_LOCATION_START; i <= MALI_OSK_RESOURCE_PP_LOCATION_END; i++) {
+ if (MALI_OSK_INVALID_RESOURCE_ADDRESS != mali_osk_resource_bank[i].base) {
+ pp_core_num++;
+ }
+ }
+
+ /* We have to divide by 2, because we caculate twice for only one pp(pp_core and pp_mmu_core). */
+ if (0 != pp_core_num % 2) {
+ MALI_DEBUG_PRINT(2, ("The value of pp core number isn't normal."));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pp_core_num /= 2;
+
+ /**
+ * we can caculate the number of l2 cache core according the number of pp core number
+ * and device type(mali400/mali450/mali470).
+ */
+ l2_core_num = 1;
+ if (mali_is_450) {
+ if (pp_core_num > 4) {
+ l2_core_num = 3;
+ } else if (pp_core_num <= 4) {
+ l2_core_num = 2;
+ }
+ }
+
+ for (i = MALI_OSK_RESOURCE_l2_LOCATION_END; i > MALI_OSK_RESOURCE_L2_LOCATION_START + l2_core_num - 1; i--) {
+ mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+ }
+
+ /* If device is not mali-450 type, we have to remove related resource from resource bank. */
+ if (!(mali_is_450 || mali_is_470)) {
+ for (i = MALI_OSK_RESOURCE_l2_LOCATION_END + 1; i < MALI_OSK_MAX_RESOURCE_NUMBER; i++) {
+ mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+ }
+ }
+
+ if (mali_is_470)
+ mali_osk_resource_bank[MALI_OSK_RESOURCE_DMA_LOCATION].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
+{
+ int i;
+
+ if (NULL == mali_platform_device) {
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ /* Traverse all of resources in resources bank to find the matching one. */
+ for (i = 0; i < MALI_OSK_MAX_RESOURCE_NUMBER; i++) {
+ if (mali_osk_resource_bank[i].base == addr) {
+ if (NULL != res) {
+ res->base = addr + _mali_osk_resource_base_address();
+ res->description = mali_osk_resource_bank[i].description;
+ res->irq = mali_osk_resource_bank[i].irq;
+ }
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+uintptr_t _mali_osk_resource_base_address(void)
+{
+ struct resource *reg_res = NULL;
+ uintptr_t ret = 0;
+
+ reg_res = platform_get_resource(mali_platform_device, IORESOURCE_MEM, 0);
+
+ if (NULL != reg_res) {
+ ret = reg_res->start;
+ }
+
+ return ret;
+}
+
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size)
+{
+ struct device_node *node = mali_platform_device->dev.of_node;
+ struct property *prop;
+ const __be32 *p;
+ int length = 0, i = 0;
+ u32 u;
+
+ MALI_DEBUG_PRINT(2, ("Get pmu config from device tree configuration.\n"));
+
+ MALI_DEBUG_ASSERT(NULL != node);
+
+ if (!of_get_property(node, "pmu_domain_config", &length)) {
+ return;
+ }
+
+ if (array_size != length / sizeof(u32)) {
+ MALI_PRINT_ERROR(("Wrong pmu domain config in device tree."));
+ return;
+ }
+
+ of_property_for_each_u32(node, "pmu_domain_config", prop, p, u) {
+ domain_config_array[i] = (u16)u;
+ i++;
+ }
+
+ return;
+}
+
+u32 _mali_osk_get_pmu_switch_delay(void)
+{
+ struct device_node *node = mali_platform_device->dev.of_node;
+ u32 switch_delay;
+
+ MALI_DEBUG_ASSERT(NULL != node);
+
+ if (0 == of_property_read_u32(node, "pmu_switch_delay", &switch_delay)) {
+ return switch_delay;
+ } else {
+ MALI_DEBUG_PRINT(2, ("Couldn't find pmu_switch_delay in device tree configuration.\n"));
+ }
+
+ return 0;
+}
+
+#else /* CONFIG_MALI_DT && !CONFIG_MALI_PLAT_SPECIFIC_DT */
+
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
+{
+ int i;
+ uintptr_t phys_addr;
+
+ if (NULL == mali_platform_device) {
+ /* Not connected to a device */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ phys_addr = addr + _mali_osk_resource_base_address();
+ for (i = 0; i < mali_platform_device->num_resources; i++) {
+ if (IORESOURCE_MEM == resource_type(&(mali_platform_device->resource[i])) &&
+ mali_platform_device->resource[i].start == phys_addr) {
+ if (NULL != res) {
+ res->base = phys_addr;
+ res->description = mali_platform_device->resource[i].name;
+
+ /* Any (optional) IRQ resource belonging to this resource will follow */
+ if ((i + 1) < mali_platform_device->num_resources &&
+ IORESOURCE_IRQ == resource_type(&(mali_platform_device->resource[i + 1]))) {
+ res->irq = mali_platform_device->resource[i + 1].start;
+ } else {
+ res->irq = -1;
+ }
+ }
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+uintptr_t _mali_osk_resource_base_address(void)
+{
+ uintptr_t lowest_addr = (uintptr_t)(0 - 1);
+ uintptr_t ret = 0;
+
+ if (NULL != mali_platform_device) {
+ int i;
+ for (i = 0; i < mali_platform_device->num_resources; i++) {
+ if (mali_platform_device->resource[i].flags & IORESOURCE_MEM &&
+ mali_platform_device->resource[i].start < lowest_addr) {
+ lowest_addr = mali_platform_device->resource[i].start;
+ ret = lowest_addr;
+ }
+ }
+ }
+
+ return ret;
+}
+
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size)
+{
+ _mali_osk_device_data data = { 0, };
+
+ MALI_DEBUG_PRINT(2, ("Get pmu config from platform device data.\n"));
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Copy the custom customer power domain config */
+ _mali_osk_memcpy(domain_config_array, data.pmu_domain_config, sizeof(data.pmu_domain_config));
+ }
+
+ return;
+}
+
+u32 _mali_osk_get_pmu_switch_delay(void)
+{
+ _mali_osk_errcode_t err;
+ _mali_osk_device_data data = { 0, };
+
+ err = _mali_osk_device_data_get(&data);
+
+ if (_MALI_OSK_ERR_OK == err) {
+ return data.pmu_switch_delay;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_MALI_DT */
+
+_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data)
+{
+ MALI_DEBUG_ASSERT_POINTER(data);
+
+ if (NULL != mali_platform_device) {
+ struct mali_gpu_device_data *os_data = NULL;
+
+ os_data = (struct mali_gpu_device_data *)mali_platform_device->dev.platform_data;
+ if (NULL != os_data) {
+ /* Copy data from OS dependant struct to Mali neutral struct (identical!) */
+ BUILD_BUG_ON(sizeof(*os_data) != sizeof(*data));
+ _mali_osk_memcpy(data, os_data, sizeof(*os_data));
+
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+}
+
+u32 _mali_osk_identify_gpu_resource(void)
+{
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_L2_RESOURCE1, NULL))
+ /* Mali 450 */
+ return 0x450;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_DLBU, NULL))
+ /* Mali 470 */
+ return 0x470;
+
+ /* Mali 400 */
+ return 0x400;
+}
+
+mali_bool _mali_osk_shared_interrupts(void)
+{
+ u32 irqs[128];
+ u32 i, j, irq, num_irqs_found = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+ MALI_DEBUG_ASSERT(128 >= mali_platform_device->num_resources);
+
+ for (i = 0; i < mali_platform_device->num_resources; i++) {
+ if (IORESOURCE_IRQ & mali_platform_device->resource[i].flags) {
+ irq = mali_platform_device->resource[i].start;
+
+ for (j = 0; j < num_irqs_found; ++j) {
+ if (irq == irqs[j]) {
+ return MALI_TRUE;
+ }
+ }
+
+ irqs[num_irqs_found++] = irq;
+ }
+ }
+
+ return MALI_FALSE;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_math.c b/drivers/gpu/arm/utgard/linux/mali_osk_math.c
new file mode 100644
index 000000000000..085ce76f7665
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_math.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2010, 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_math.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/bitops.h>
+
+u32 _mali_osk_clz(u32 input)
+{
+ return 32 - fls(input);
+}
+
+u32 _mali_osk_fls(u32 input)
+{
+ return fls(input);
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_memory.c b/drivers/gpu/arm/utgard/linux/mali_osk_memory.c
new file mode 100644
index 000000000000..390e613e186d
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_memory.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+void inline *_mali_osk_calloc(u32 n, u32 size)
+{
+ return kcalloc(n, size, GFP_KERNEL);
+}
+
+void inline *_mali_osk_malloc(u32 size)
+{
+ return kmalloc(size, GFP_KERNEL);
+}
+
+void inline _mali_osk_free(void *ptr)
+{
+ kfree(ptr);
+}
+
+void inline *_mali_osk_valloc(u32 size)
+{
+ return vmalloc(size);
+}
+
+void inline _mali_osk_vfree(void *ptr)
+{
+ vfree(ptr);
+}
+
+void inline *_mali_osk_memcpy(void *dst, const void *src, u32 len)
+{
+ return memcpy(dst, src, len);
+}
+
+void inline *_mali_osk_memset(void *s, u32 c, u32 n)
+{
+ return memset(s, c, n);
+}
+
+mali_bool _mali_osk_mem_check_allocated(u32 max_allocated)
+{
+ /* No need to prevent an out-of-memory dialogue appearing on Linux,
+ * so we always return MALI_TRUE.
+ */
+ return MALI_TRUE;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_misc.c b/drivers/gpu/arm/utgard/linux/mali_osk_misc.c
new file mode 100644
index 000000000000..0a619e3fc27e
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_misc.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_misc.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+
+#if !defined(CONFIG_MALI_QUIET)
+void _mali_osk_dbgmsg(const char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+}
+#endif /* !defined(CONFIG_MALI_QUIET) */
+
+u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...)
+{
+ int res;
+ va_list args;
+ va_start(args, fmt);
+
+ res = vscnprintf(buf, (size_t)size, fmt, args);
+
+ va_end(args);
+ return res;
+}
+
+void _mali_osk_ctxprintf(_mali_osk_print_ctx *print_ctx, const char *fmt, ...)
+{
+ va_list args;
+ char buf[512];
+
+ va_start(args, fmt);
+ vscnprintf(buf, 512, fmt, args);
+ seq_printf(print_ctx, buf);
+ va_end(args);
+}
+
+void _mali_osk_abort(void)
+{
+ /* make a simple fault by dereferencing a NULL pointer */
+ dump_stack();
+ *(int *)0 = 0;
+}
+
+void _mali_osk_break(void)
+{
+ _mali_osk_abort();
+}
+
+u32 _mali_osk_get_pid(void)
+{
+ /* Thread group ID is the process ID on Linux */
+ return (u32)current->tgid;
+}
+
+char *_mali_osk_get_comm(void)
+{
+ return (char *)current->comm;
+}
+
+
+u32 _mali_osk_get_tid(void)
+{
+ /* pid is actually identifying the thread on Linux */
+ u32 tid = current->pid;
+
+ /* If the pid is 0 the core was idle. Instead of returning 0 we return a special number
+ * identifying which core we are on. */
+ if (0 == tid) {
+ tid = -(1 + raw_smp_processor_id());
+ }
+
+ return tid;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_notification.c b/drivers/gpu/arm/utgard/linux/mali_osk_notification.c
new file mode 100644
index 000000000000..e66fe83f3557
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_notification.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_notification.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/**
+ * Declaration of the notification queue object type
+ * Contains a linked list of notification pending delivery to user space.
+ * It also contains a wait queue of exclusive waiters blocked in the ioctl
+ * When a new notification is posted a single thread is resumed.
+ */
+struct _mali_osk_notification_queue_t_struct {
+ spinlock_t mutex; /**< Mutex protecting the list */
+ wait_queue_head_t receive_queue; /**< Threads waiting for new entries to the queue */
+ struct list_head head; /**< List of notifications waiting to be picked up */
+};
+
+typedef struct _mali_osk_notification_wrapper_t_struct {
+ struct list_head list; /**< Internal linked list variable */
+ _mali_osk_notification_t data; /**< Notification data */
+} _mali_osk_notification_wrapper_t;
+
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init(void)
+{
+ _mali_osk_notification_queue_t *result;
+
+ result = (_mali_osk_notification_queue_t *)kmalloc(sizeof(_mali_osk_notification_queue_t), GFP_KERNEL);
+ if (NULL == result) return NULL;
+
+ spin_lock_init(&result->mutex);
+ init_waitqueue_head(&result->receive_queue);
+ INIT_LIST_HEAD(&result->head);
+
+ return result;
+}
+
+_mali_osk_notification_t *_mali_osk_notification_create(u32 type, u32 size)
+{
+ /* OPT Recycling of notification objects */
+ _mali_osk_notification_wrapper_t *notification;
+
+ notification = (_mali_osk_notification_wrapper_t *)kmalloc(sizeof(_mali_osk_notification_wrapper_t) + size,
+ GFP_KERNEL | __GFP_HIGH | __GFP_REPEAT);
+ if (NULL == notification) {
+ MALI_DEBUG_PRINT(1, ("Failed to create a notification object\n"));
+ return NULL;
+ }
+
+ /* Init the list */
+ INIT_LIST_HEAD(&notification->list);
+
+ if (0 != size) {
+ notification->data.result_buffer = ((u8 *)notification) + sizeof(_mali_osk_notification_wrapper_t);
+ } else {
+ notification->data.result_buffer = NULL;
+ }
+
+ /* set up the non-allocating fields */
+ notification->data.notification_type = type;
+ notification->data.result_buffer_size = size;
+
+ /* all ok */
+ return &(notification->data);
+}
+
+void _mali_osk_notification_delete(_mali_osk_notification_t *object)
+{
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER(object);
+
+ notification = container_of(object, _mali_osk_notification_wrapper_t, data);
+
+ /* Free the container */
+ kfree(notification);
+}
+
+void _mali_osk_notification_queue_term(_mali_osk_notification_queue_t *queue)
+{
+ _mali_osk_notification_t *result;
+ MALI_DEBUG_ASSERT_POINTER(queue);
+
+ while (_MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, &result)) {
+ _mali_osk_notification_delete(result);
+ }
+
+ /* not much to do, just free the memory */
+ kfree(queue);
+}
+void _mali_osk_notification_queue_send(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object)
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ unsigned long irq_flags;
+#endif
+
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER(queue);
+ MALI_DEBUG_ASSERT_POINTER(object);
+
+ notification = container_of(object, _mali_osk_notification_wrapper_t, data);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ spin_lock_irqsave(&queue->mutex, irq_flags);
+#else
+ spin_lock(&queue->mutex);
+#endif
+
+ list_add_tail(&notification->list, &queue->head);
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ spin_unlock_irqrestore(&queue->mutex, irq_flags);
+#else
+ spin_unlock(&queue->mutex);
+#endif
+
+ /* and wake up one possible exclusive waiter */
+ wake_up(&queue->receive_queue);
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result)
+{
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ unsigned long irq_flags;
+#endif
+
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ _mali_osk_notification_wrapper_t *wrapper_object;
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ spin_lock_irqsave(&queue->mutex, irq_flags);
+#else
+ spin_lock(&queue->mutex);
+#endif
+
+ if (!list_empty(&queue->head)) {
+ wrapper_object = list_entry(queue->head.next, _mali_osk_notification_wrapper_t, list);
+ *result = &(wrapper_object->data);
+ list_del_init(&wrapper_object->list);
+ ret = _MALI_OSK_ERR_OK;
+ }
+
+#if defined(MALI_UPPER_HALF_SCHEDULING)
+ spin_unlock_irqrestore(&queue->mutex, irq_flags);
+#else
+ spin_unlock(&queue->mutex);
+#endif
+
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_receive(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result)
+{
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(queue);
+ MALI_DEBUG_ASSERT_POINTER(result);
+
+ /* default result */
+ *result = NULL;
+
+ if (wait_event_interruptible(queue->receive_queue,
+ _MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, result))) {
+ return _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+
+ return _MALI_OSK_ERR_OK; /* all ok */
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_pm.c b/drivers/gpu/arm/utgard/linux/mali_osk_pm.c
new file mode 100644
index 000000000000..21180d33fe75
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_pm.c
@@ -0,0 +1,83 @@
+/**
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_pm.c
+ * Implementation of the callback functions from common power management
+ */
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_linux.h"
+
+/* Can NOT run in atomic context */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ int err;
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+ err = pm_runtime_get_sync(&(mali_platform_device->dev));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ pm_runtime_mark_last_busy(&(mali_platform_device->dev));
+#endif
+ if (0 > err) {
+ MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get_sync() returned error code %d\n", err));
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+ return _MALI_OSK_ERR_OK;
+}
+
+/* Can run in atomic context */
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_async(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ int err;
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+ err = pm_runtime_get(&(mali_platform_device->dev));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ pm_runtime_mark_last_busy(&(mali_platform_device->dev));
+#endif
+ if (0 > err && -EINPROGRESS != err) {
+ MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get() returned error code %d\n", err));
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif
+ return _MALI_OSK_ERR_OK;
+}
+
+
+/* Can run in atomic context */
+void _mali_osk_pm_dev_ref_put(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ pm_runtime_mark_last_busy(&(mali_platform_device->dev));
+ pm_runtime_put_autosuspend(&(mali_platform_device->dev));
+#else
+ pm_runtime_put(&(mali_platform_device->dev));
+#endif
+#endif
+}
+
+void _mali_osk_pm_dev_barrier(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_barrier(&(mali_platform_device->dev));
+#endif
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_profiling.c b/drivers/gpu/arm/utgard/linux/mali_osk_profiling.c
new file mode 100644
index 000000000000..cc09748e7316
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_profiling.c
@@ -0,0 +1,1272 @@
+/*
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/anon_inodes.h>
+#include <linux/sched.h>
+
+#include <mali_profiling_gator_api.h>
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_uk_types.h"
+#include "mali_osk_profiling.h"
+#include "mali_linux_trace.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_l2_cache.h"
+#include "mali_user_settings_db.h"
+#include "mali_executor.h"
+#include "mali_memory_manager.h"
+
+#define MALI_PROFILING_STREAM_DATA_DEFAULT_SIZE 100
+#define MALI_PROFILING_STREAM_HOLD_TIME 1000000 /*1 ms */
+
+#define MALI_PROFILING_STREAM_BUFFER_SIZE (1 << 12)
+#define MALI_PROFILING_STREAM_BUFFER_NUM 100
+
+/**
+ * Define the mali profiling stream struct.
+ */
+typedef struct mali_profiling_stream {
+ u8 data[MALI_PROFILING_STREAM_BUFFER_SIZE];
+ u32 used_size;
+ struct list_head list;
+} mali_profiling_stream;
+
+typedef struct mali_profiling_stream_list {
+ spinlock_t spin_lock;
+ struct list_head free_list;
+ struct list_head queue_list;
+} mali_profiling_stream_list;
+
+static const char mali_name[] = "4xx";
+static const char utgard_setup_version[] = "ANNOTATE_SETUP 1\n";
+
+static u32 profiling_sample_rate = 0;
+static u32 first_sw_counter_index = 0;
+
+static mali_bool l2_cache_counter_if_enabled = MALI_FALSE;
+static u32 num_counters_enabled = 0;
+static u32 mem_counters_enabled = 0;
+
+static _mali_osk_atomic_t stream_fd_if_used;
+
+static wait_queue_head_t stream_fd_wait_queue;
+static mali_profiling_counter *global_mali_profiling_counters = NULL;
+static u32 num_global_mali_profiling_counters = 0;
+
+static mali_profiling_stream_list *global_mali_stream_list = NULL;
+static mali_profiling_stream *mali_counter_stream = NULL;
+static mali_profiling_stream *mali_core_activity_stream = NULL;
+static u64 mali_core_activity_stream_dequeue_time = 0;
+static spinlock_t mali_activity_lock;
+static u32 mali_activity_cores_num = 0;
+static struct hrtimer profiling_sampling_timer;
+
+const char *_mali_mem_counter_descriptions[] = _MALI_MEM_COUTNER_DESCRIPTIONS;
+const char *_mali_special_counter_descriptions[] = _MALI_SPCIAL_COUNTER_DESCRIPTIONS;
+
+static u32 current_profiling_pid = 0;
+
+static void _mali_profiling_stream_list_destory(mali_profiling_stream_list *profiling_stream_list)
+{
+ mali_profiling_stream *profiling_stream, *tmp_profiling_stream;
+ MALI_DEBUG_ASSERT_POINTER(profiling_stream_list);
+
+ list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &profiling_stream_list->free_list, list) {
+ list_del(&profiling_stream->list);
+ kfree(profiling_stream);
+ }
+
+ list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &profiling_stream_list->queue_list, list) {
+ list_del(&profiling_stream->list);
+ kfree(profiling_stream);
+ }
+
+ kfree(profiling_stream_list);
+}
+
+static void _mali_profiling_global_stream_list_free(void)
+{
+ mali_profiling_stream *profiling_stream, *tmp_profiling_stream;
+ unsigned long irq_flags;
+
+ MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+ spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags);
+ list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &global_mali_stream_list->queue_list, list) {
+ profiling_stream->used_size = 0;
+ list_move(&profiling_stream->list, &global_mali_stream_list->free_list);
+ }
+ spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags);
+}
+
+static _mali_osk_errcode_t _mali_profiling_global_stream_list_dequeue(struct list_head *stream_list, mali_profiling_stream **new_mali_profiling_stream)
+{
+ unsigned long irq_flags;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
+ MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+ MALI_DEBUG_ASSERT_POINTER(stream_list);
+
+ spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags);
+
+ if (!list_empty(stream_list)) {
+ *new_mali_profiling_stream = list_entry(stream_list->next, mali_profiling_stream, list);
+ list_del_init(&(*new_mali_profiling_stream)->list);
+ } else {
+ ret = _MALI_OSK_ERR_NOMEM;
+ }
+
+ spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags);
+
+ return ret;
+}
+
+static void _mali_profiling_global_stream_list_queue(struct list_head *stream_list, mali_profiling_stream *current_mali_profiling_stream)
+{
+ unsigned long irq_flags;
+ MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+ MALI_DEBUG_ASSERT_POINTER(stream_list);
+
+ spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags);
+ list_add_tail(&current_mali_profiling_stream->list, stream_list);
+ spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags);
+}
+
+static mali_bool _mali_profiling_global_stream_queue_list_if_empty(void)
+{
+ MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+ return list_empty(&global_mali_stream_list->queue_list);
+}
+
+static u32 _mali_profiling_global_stream_queue_list_next_size(void)
+{
+ unsigned long irq_flags;
+ u32 size = 0;
+ MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+
+ spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags);
+ if (!list_empty(&global_mali_stream_list->queue_list)) {
+ mali_profiling_stream *next_mali_profiling_stream =
+ list_entry(global_mali_stream_list->queue_list.next, mali_profiling_stream, list);
+ size = next_mali_profiling_stream->used_size;
+ }
+ spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags);
+ return size;
+}
+
+/* The mali profiling stream file operations functions. */
+static ssize_t _mali_profiling_stream_read(
+ struct file *filp,
+ char __user *buffer,
+ size_t size,
+ loff_t *f_pos);
+
+static unsigned int _mali_profiling_stream_poll(struct file *filp, poll_table *wait);
+
+static int _mali_profiling_stream_release(struct inode *inode, struct file *filp);
+
+/* The timeline stream file operations structure. */
+static const struct file_operations mali_profiling_stream_fops = {
+ .release = _mali_profiling_stream_release,
+ .read = _mali_profiling_stream_read,
+ .poll = _mali_profiling_stream_poll,
+};
+
+static ssize_t _mali_profiling_stream_read(
+ struct file *filp,
+ char __user *buffer,
+ size_t size,
+ loff_t *f_pos)
+{
+ u32 copy_len = 0;
+ mali_profiling_stream *current_mali_profiling_stream;
+ u32 used_size;
+ MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+
+ while (!_mali_profiling_global_stream_queue_list_if_empty()) {
+ used_size = _mali_profiling_global_stream_queue_list_next_size();
+ if (used_size <= ((u32)size - copy_len)) {
+ current_mali_profiling_stream = NULL;
+ _mali_profiling_global_stream_list_dequeue(&global_mali_stream_list->queue_list,
+ &current_mali_profiling_stream);
+ MALI_DEBUG_ASSERT_POINTER(current_mali_profiling_stream);
+ if (copy_to_user(&buffer[copy_len], current_mali_profiling_stream->data, current_mali_profiling_stream->used_size)) {
+ current_mali_profiling_stream->used_size = 0;
+ _mali_profiling_global_stream_list_queue(&global_mali_stream_list->free_list, current_mali_profiling_stream);
+ return -EFAULT;
+ }
+ copy_len += current_mali_profiling_stream->used_size;
+ current_mali_profiling_stream->used_size = 0;
+ _mali_profiling_global_stream_list_queue(&global_mali_stream_list->free_list, current_mali_profiling_stream);
+ } else {
+ break;
+ }
+ }
+ return (ssize_t)copy_len;
+}
+
+static unsigned int _mali_profiling_stream_poll(struct file *filp, poll_table *wait)
+{
+ poll_wait(filp, &stream_fd_wait_queue, wait);
+ if (!_mali_profiling_global_stream_queue_list_if_empty())
+ return POLLIN;
+ return 0;
+}
+
+static int _mali_profiling_stream_release(struct inode *inode, struct file *filp)
+{
+ _mali_osk_atomic_init(&stream_fd_if_used, 0);
+ return 0;
+}
+
+/* The funs for control packet and stream data.*/
+static void _mali_profiling_set_packet_size(unsigned char *const buf, const u32 size)
+{
+ u32 i;
+
+ for (i = 0; i < sizeof(size); ++i)
+ buf[i] = (size >> 8 * i) & 0xFF;
+}
+
+static u32 _mali_profiling_get_packet_size(unsigned char *const buf)
+{
+ u32 i;
+ u32 size = 0;
+ for (i = 0; i < sizeof(size); ++i)
+ size |= (u32)buf[i] << 8 * i;
+ return size;
+}
+
+static u32 _mali_profiling_read_packet_int(unsigned char *const buf, u32 *const pos, u32 const packet_size)
+{
+ u64 int_value = 0;
+ u8 shift = 0;
+ u8 byte_value = ~0;
+
+ while ((byte_value & 0x80) != 0) {
+ MALI_DEBUG_ASSERT((*pos) < packet_size);
+ byte_value = buf[*pos];
+ *pos += 1;
+ int_value |= (u32)(byte_value & 0x7f) << shift;
+ shift += 7;
+ }
+
+ if (shift < 8 * sizeof(int_value) && (byte_value & 0x40) != 0) {
+ int_value |= -(1 << shift);
+ }
+
+ return int_value;
+}
+
+static u32 _mali_profiling_pack_int(u8 *const buf, u32 const buf_size, u32 const pos, s32 value)
+{
+ u32 add_bytes = 0;
+ int more = 1;
+ while (more) {
+ /* low order 7 bits of val */
+ char byte_value = value & 0x7f;
+ value >>= 7;
+
+ if ((value == 0 && (byte_value & 0x40) == 0) || (value == -1 && (byte_value & 0x40) != 0)) {
+ more = 0;
+ } else {
+ byte_value |= 0x80;
+ }
+
+ MALI_DEBUG_ASSERT((pos + add_bytes) < buf_size);
+ buf[pos + add_bytes] = byte_value;
+ add_bytes++;
+ }
+
+ return add_bytes;
+}
+
+static int _mali_profiling_pack_long(uint8_t *const buf, u32 const buf_size, u32 const pos, s64 val)
+{
+ int add_bytes = 0;
+ int more = 1;
+ while (more) {
+ /* low order 7 bits of x */
+ char byte_value = val & 0x7f;
+ val >>= 7;
+
+ if ((val == 0 && (byte_value & 0x40) == 0) || (val == -1 && (byte_value & 0x40) != 0)) {
+ more = 0;
+ } else {
+ byte_value |= 0x80;
+ }
+
+ MALI_DEBUG_ASSERT((pos + add_bytes) < buf_size);
+ buf[pos + add_bytes] = byte_value;
+ add_bytes++;
+ }
+
+ return add_bytes;
+}
+
+static void _mali_profiling_stream_add_counter(mali_profiling_stream *profiling_stream, s64 current_time, u32 key, u32 counter_value)
+{
+ u32 add_size = STREAM_HEADER_SIZE;
+ MALI_DEBUG_ASSERT_POINTER(profiling_stream);
+ MALI_DEBUG_ASSERT((profiling_stream->used_size) < MALI_PROFILING_STREAM_BUFFER_SIZE);
+
+ profiling_stream->data[profiling_stream->used_size] = STREAM_HEADER_COUNTER_VALUE;
+
+ add_size += _mali_profiling_pack_long(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE,
+ profiling_stream->used_size + add_size, current_time);
+ add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE,
+ profiling_stream->used_size + add_size, (s32)0);
+ add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE,
+ profiling_stream->used_size + add_size, (s32)key);
+ add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE,
+ profiling_stream->used_size + add_size, (s32)counter_value);
+
+ _mali_profiling_set_packet_size(profiling_stream->data + profiling_stream->used_size + 1,
+ add_size - STREAM_HEADER_SIZE);
+
+ profiling_stream->used_size += add_size;
+}
+
+/* The callback function for sampling timer.*/
+static enum hrtimer_restart _mali_profiling_sampling_counters(struct hrtimer *timer)
+{
+ u32 counter_index;
+ s64 current_time;
+ MALI_DEBUG_ASSERT_POINTER(global_mali_profiling_counters);
+ MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list);
+
+ MALI_DEBUG_ASSERT(NULL == mali_counter_stream);
+ if (_MALI_OSK_ERR_OK == _mali_profiling_global_stream_list_dequeue(
+ &global_mali_stream_list->free_list, &mali_counter_stream)) {
+
+ MALI_DEBUG_ASSERT_POINTER(mali_counter_stream);
+ MALI_DEBUG_ASSERT(0 == mali_counter_stream->used_size);
+
+ /* Capture l2 cache counter values if enabled */
+ if (MALI_TRUE == l2_cache_counter_if_enabled) {
+ int i, j = 0;
+ _mali_profiling_l2_counter_values l2_counters_values;
+ _mali_profiling_get_l2_counters(&l2_counters_values);
+
+ for (i = COUNTER_L2_0_C0; i <= COUNTER_L2_2_C1; i++) {
+ if (0 == (j % 2))
+ _mali_osk_profiling_record_global_counters(i, l2_counters_values.cores[j / 2].value0);
+ else
+ _mali_osk_profiling_record_global_counters(i, l2_counters_values.cores[j / 2].value1);
+ j++;
+ }
+ }
+
+ current_time = (s64)_mali_osk_boot_time_get_ns();
+
+ /* Add all enabled counter values into stream */
+ for (counter_index = 0; counter_index < num_global_mali_profiling_counters; counter_index++) {
+ /* No need to sample these couners here. */
+ if (global_mali_profiling_counters[counter_index].enabled) {
+ if ((global_mali_profiling_counters[counter_index].counter_id >= FIRST_MEM_COUNTER &&
+ global_mali_profiling_counters[counter_index].counter_id <= LAST_MEM_COUNTER)
+ || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_VP_ACTIVITY)
+ || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_FP_ACTIVITY)
+ || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_FILMSTRIP)) {
+
+ continue;
+ }
+
+ if (global_mali_profiling_counters[counter_index].counter_id >= COUNTER_L2_0_C0 &&
+ global_mali_profiling_counters[counter_index].counter_id <= COUNTER_L2_2_C1) {
+
+ u32 prev_val = global_mali_profiling_counters[counter_index].prev_counter_value;
+
+ _mali_profiling_stream_add_counter(mali_counter_stream, current_time, global_mali_profiling_counters[counter_index].key,
+ global_mali_profiling_counters[counter_index].current_counter_value - prev_val);
+
+ prev_val = global_mali_profiling_counters[counter_index].current_counter_value;
+
+ global_mali_profiling_counters[counter_index].prev_counter_value = prev_val;
+ } else {
+
+ if (global_mali_profiling_counters[counter_index].counter_id == COUNTER_TOTAL_ALLOC_PAGES) {
+ u32 total_alloc_mem = _mali_ukk_report_memory_usage();
+ global_mali_profiling_counters[counter_index].current_counter_value = total_alloc_mem / _MALI_OSK_MALI_PAGE_SIZE;
+ }
+ _mali_profiling_stream_add_counter(mali_counter_stream, current_time, global_mali_profiling_counters[counter_index].key,
+ global_mali_profiling_counters[counter_index].current_counter_value);
+ if (global_mali_profiling_counters[counter_index].counter_id < FIRST_SPECIAL_COUNTER)
+ global_mali_profiling_counters[counter_index].current_counter_value = 0;
+ }
+ }
+ }
+ _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_counter_stream);
+ mali_counter_stream = NULL;
+ } else {
+ MALI_DEBUG_PRINT(1, ("Not enough mali profiling stream buffer!\n"));
+ }
+
+ wake_up_interruptible(&stream_fd_wait_queue);
+
+ /*Enable the sampling timer again*/
+ if (0 != num_counters_enabled && 0 != profiling_sample_rate) {
+ hrtimer_forward_now(&profiling_sampling_timer, ns_to_ktime(profiling_sample_rate));
+ return HRTIMER_RESTART;
+ }
+ return HRTIMER_NORESTART;
+}
+
+static void _mali_profiling_sampling_core_activity_switch(int counter_id, int core, u32 activity, u32 pid)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&mali_activity_lock, irq_flags);
+ if (activity == 0)
+ mali_activity_cores_num--;
+ else
+ mali_activity_cores_num++;
+ spin_unlock_irqrestore(&mali_activity_lock, irq_flags);
+
+ if (NULL != global_mali_profiling_counters) {
+ int i ;
+ for (i = 0; i < num_global_mali_profiling_counters; i++) {
+ if (counter_id == global_mali_profiling_counters[i].counter_id && global_mali_profiling_counters[i].enabled) {
+ u64 current_time = _mali_osk_boot_time_get_ns();
+ u32 add_size = STREAM_HEADER_SIZE;
+
+ if (NULL != mali_core_activity_stream) {
+ if ((mali_core_activity_stream_dequeue_time + MALI_PROFILING_STREAM_HOLD_TIME < current_time) ||
+ (MALI_PROFILING_STREAM_DATA_DEFAULT_SIZE > MALI_PROFILING_STREAM_BUFFER_SIZE
+ - mali_core_activity_stream->used_size)) {
+ _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_core_activity_stream);
+ mali_core_activity_stream = NULL;
+ wake_up_interruptible(&stream_fd_wait_queue);
+ }
+ }
+
+ if (NULL == mali_core_activity_stream) {
+ if (_MALI_OSK_ERR_OK == _mali_profiling_global_stream_list_dequeue(
+ &global_mali_stream_list->free_list, &mali_core_activity_stream)) {
+ mali_core_activity_stream_dequeue_time = current_time;
+ } else {
+ MALI_DEBUG_PRINT(1, ("Not enough mali profiling stream buffer!\n"));
+ wake_up_interruptible(&stream_fd_wait_queue);
+ break;
+ }
+
+ }
+
+ mali_core_activity_stream->data[mali_core_activity_stream->used_size] = STREAM_HEADER_CORE_ACTIVITY;
+
+ add_size += _mali_profiling_pack_long(mali_core_activity_stream->data,
+ MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, (s64)current_time);
+ add_size += _mali_profiling_pack_int(mali_core_activity_stream->data,
+ MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, core);
+ add_size += _mali_profiling_pack_int(mali_core_activity_stream->data,
+ MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, (s32)global_mali_profiling_counters[i].key);
+ add_size += _mali_profiling_pack_int(mali_core_activity_stream->data,
+ MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, activity);
+ add_size += _mali_profiling_pack_int(mali_core_activity_stream->data,
+ MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, pid);
+
+ _mali_profiling_set_packet_size(mali_core_activity_stream->data + mali_core_activity_stream->used_size + 1,
+ add_size - STREAM_HEADER_SIZE);
+
+ mali_core_activity_stream->used_size += add_size;
+
+ if (0 == mali_activity_cores_num) {
+ _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_core_activity_stream);
+ mali_core_activity_stream = NULL;
+ wake_up_interruptible(&stream_fd_wait_queue);
+ }
+
+ break;
+ }
+ }
+ }
+}
+
+static mali_bool _mali_profiling_global_counters_init(void)
+{
+ int core_id, counter_index, counter_number, counter_id;
+ u32 num_l2_cache_cores;
+ u32 num_pp_cores;
+ u32 num_gp_cores = 1;
+
+ MALI_DEBUG_ASSERT(NULL == global_mali_profiling_counters);
+ num_pp_cores = mali_pp_get_glob_num_pp_cores();
+ num_l2_cache_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+
+ num_global_mali_profiling_counters = 3 * (num_gp_cores + num_pp_cores) + 2 * num_l2_cache_cores
+ + MALI_PROFILING_SW_COUNTERS_NUM
+ + MALI_PROFILING_SPECIAL_COUNTERS_NUM
+ + MALI_PROFILING_MEM_COUNTERS_NUM;
+ global_mali_profiling_counters = _mali_osk_calloc(num_global_mali_profiling_counters, sizeof(mali_profiling_counter));
+
+ if (NULL == global_mali_profiling_counters)
+ return MALI_FALSE;
+
+ counter_index = 0;
+ /*Vertex processor counters */
+ for (core_id = 0; core_id < num_gp_cores; core_id ++) {
+ global_mali_profiling_counters[counter_index].counter_id = ACTIVITY_VP_0 + core_id;
+ _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+ sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_VP_%d_active", mali_name, core_id);
+
+ for (counter_number = 0; counter_number < 2; counter_number++) {
+ counter_index++;
+ global_mali_profiling_counters[counter_index].counter_id = COUNTER_VP_0_C0 + (2 * core_id) + counter_number;
+ _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+ sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_VP_%d_cnt%d", mali_name, core_id, counter_number);
+ }
+ }
+
+ /* Fragment processors' counters */
+ for (core_id = 0; core_id < num_pp_cores; core_id++) {
+ counter_index++;
+ global_mali_profiling_counters[counter_index].counter_id = ACTIVITY_FP_0 + core_id;
+ _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+ sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_FP_%d_active", mali_name, core_id);
+
+ for (counter_number = 0; counter_number < 2; counter_number++) {
+ counter_index++;
+ global_mali_profiling_counters[counter_index].counter_id = COUNTER_FP_0_C0 + (2 * core_id) + counter_number;
+ _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+ sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_FP_%d_cnt%d", mali_name, core_id, counter_number);
+ }
+ }
+
+ /* L2 Cache counters */
+ for (core_id = 0; core_id < num_l2_cache_cores; core_id++) {
+ for (counter_number = 0; counter_number < 2; counter_number++) {
+ counter_index++;
+ global_mali_profiling_counters[counter_index].counter_id = COUNTER_L2_0_C0 + (2 * core_id) + counter_number;
+ _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+ sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_L2_%d_cnt%d", mali_name, core_id, counter_number);
+ }
+ }
+
+ /* Now set up the software counter entries */
+ for (counter_id = FIRST_SW_COUNTER; counter_id <= LAST_SW_COUNTER; counter_id++) {
+ counter_index++;
+
+ if (0 == first_sw_counter_index)
+ first_sw_counter_index = counter_index;
+
+ global_mali_profiling_counters[counter_index].counter_id = counter_id;
+ _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+ sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_SW_%d", mali_name, counter_id - FIRST_SW_COUNTER);
+ }
+
+ /* Now set up the special counter entries */
+ for (counter_id = FIRST_SPECIAL_COUNTER; counter_id <= LAST_SPECIAL_COUNTER; counter_id++) {
+
+ counter_index++;
+ _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+ sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_%s",
+ mali_name, _mali_special_counter_descriptions[counter_id - FIRST_SPECIAL_COUNTER]);
+
+ global_mali_profiling_counters[counter_index].counter_id = counter_id;
+ }
+
+ /* Now set up the mem counter entries*/
+ for (counter_id = FIRST_MEM_COUNTER; counter_id <= LAST_MEM_COUNTER; counter_id++) {
+
+ counter_index++;
+ _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name,
+ sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_%s",
+ mali_name, _mali_mem_counter_descriptions[counter_id - FIRST_MEM_COUNTER]);
+
+ global_mali_profiling_counters[counter_index].counter_id = counter_id;
+ }
+
+ MALI_DEBUG_ASSERT((counter_index + 1) == num_global_mali_profiling_counters);
+
+ return MALI_TRUE;
+}
+
+void _mali_profiling_notification_mem_counter(struct mali_session_data *session, u32 counter_id, u32 key, int enable)
+{
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (NULL != session) {
+ _mali_osk_notification_t *notification;
+ _mali_osk_notification_queue_t *queue;
+
+ queue = session->ioctl_queue;
+ MALI_DEBUG_ASSERT(NULL != queue);
+
+ notification = _mali_osk_notification_create(_MALI_NOTIFICATION_ANNOTATE_PROFILING_MEM_COUNTER,
+ sizeof(_mali_uk_annotate_profiling_mem_counter_s));
+
+ if (NULL != notification) {
+ _mali_uk_annotate_profiling_mem_counter_s *data = notification->result_buffer;
+ data->counter_id = counter_id;
+ data->key = key;
+ data->enable = enable;
+
+ _mali_osk_notification_queue_send(queue, notification);
+ } else {
+ MALI_PRINT_ERROR(("Failed to create notification object!\n"));
+ }
+ } else {
+ MALI_PRINT_ERROR(("Failed to find the right session!\n"));
+ }
+}
+
+void _mali_profiling_notification_enable(struct mali_session_data *session, u32 sampling_rate, int enable)
+{
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (NULL != session) {
+ _mali_osk_notification_t *notification;
+ _mali_osk_notification_queue_t *queue;
+
+ queue = session->ioctl_queue;
+ MALI_DEBUG_ASSERT(NULL != queue);
+
+ notification = _mali_osk_notification_create(_MALI_NOTIFICATION_ANNOTATE_PROFILING_ENABLE,
+ sizeof(_mali_uk_annotate_profiling_enable_s));
+
+ if (NULL != notification) {
+ _mali_uk_annotate_profiling_enable_s *data = notification->result_buffer;
+ data->sampling_rate = sampling_rate;
+ data->enable = enable;
+
+ _mali_osk_notification_queue_send(queue, notification);
+ } else {
+ MALI_PRINT_ERROR(("Failed to create notification object!\n"));
+ }
+ } else {
+ MALI_PRINT_ERROR(("Failed to find the right session!\n"));
+ }
+}
+
+
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start)
+{
+ int i;
+ mali_profiling_stream *new_mali_profiling_stream = NULL;
+ mali_profiling_stream_list *new_mali_profiling_stream_list = NULL;
+ if (MALI_TRUE == auto_start) {
+ mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+ }
+
+ /*Init the global_mali_stream_list*/
+ MALI_DEBUG_ASSERT(NULL == global_mali_stream_list);
+ new_mali_profiling_stream_list = (mali_profiling_stream_list *)kmalloc(sizeof(mali_profiling_stream_list), GFP_KERNEL);
+
+ if (NULL == new_mali_profiling_stream_list) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ spin_lock_init(&new_mali_profiling_stream_list->spin_lock);
+ INIT_LIST_HEAD(&new_mali_profiling_stream_list->free_list);
+ INIT_LIST_HEAD(&new_mali_profiling_stream_list->queue_list);
+
+ spin_lock_init(&mali_activity_lock);
+ mali_activity_cores_num = 0;
+
+ for (i = 0; i < MALI_PROFILING_STREAM_BUFFER_NUM; i++) {
+ new_mali_profiling_stream = (mali_profiling_stream *)kmalloc(sizeof(mali_profiling_stream), GFP_KERNEL);
+ if (NULL == new_mali_profiling_stream) {
+ _mali_profiling_stream_list_destory(new_mali_profiling_stream_list);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ INIT_LIST_HEAD(&new_mali_profiling_stream->list);
+ new_mali_profiling_stream->used_size = 0;
+ list_add_tail(&new_mali_profiling_stream->list, &new_mali_profiling_stream_list->free_list);
+
+ }
+
+ _mali_osk_atomic_init(&stream_fd_if_used, 0);
+ init_waitqueue_head(&stream_fd_wait_queue);
+
+ hrtimer_init(&profiling_sampling_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ profiling_sampling_timer.function = _mali_profiling_sampling_counters;
+
+ global_mali_stream_list = new_mali_profiling_stream_list;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_profiling_term(void)
+{
+ if (0 != profiling_sample_rate) {
+ hrtimer_cancel(&profiling_sampling_timer);
+ profiling_sample_rate = 0;
+ }
+ _mali_osk_atomic_term(&stream_fd_if_used);
+
+ if (NULL != global_mali_profiling_counters) {
+ _mali_osk_free(global_mali_profiling_counters);
+ global_mali_profiling_counters = NULL;
+ num_global_mali_profiling_counters = 0;
+ }
+
+ if (NULL != global_mali_stream_list) {
+ _mali_profiling_stream_list_destory(global_mali_stream_list);
+ global_mali_stream_list = NULL;
+ }
+
+}
+
+void _mali_osk_profiling_stop_sampling(u32 pid)
+{
+ if (pid == current_profiling_pid) {
+
+ int i;
+ /* Reset all counter states when closing connection.*/
+ for (i = 0; i < num_global_mali_profiling_counters; ++i) {
+ _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id, MALI_HW_CORE_NO_COUNTER);
+ global_mali_profiling_counters[i].enabled = 0;
+ global_mali_profiling_counters[i].prev_counter_value = 0;
+ global_mali_profiling_counters[i].current_counter_value = 0;
+ }
+ l2_cache_counter_if_enabled = MALI_FALSE;
+ num_counters_enabled = 0;
+ mem_counters_enabled = 0;
+ _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 0);
+ _mali_profiling_control(SW_COUNTER_ENABLE, 0);
+ /* Delete sampling timer when closing connection. */
+ if (0 != profiling_sample_rate) {
+ hrtimer_cancel(&profiling_sampling_timer);
+ profiling_sample_rate = 0;
+ }
+ current_profiling_pid = 0;
+ }
+}
+
+void _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
+{
+ /*Record the freq & volt to global_mali_profiling_counters here. */
+ if (0 != profiling_sample_rate) {
+ u32 channel;
+ u32 state;
+ channel = (event_id >> 16) & 0xFF;
+ state = ((event_id >> 24) & 0xF) << 24;
+
+ switch (state) {
+ case MALI_PROFILING_EVENT_TYPE_SINGLE:
+ if ((MALI_PROFILING_EVENT_CHANNEL_GPU >> 16) == channel) {
+ u32 reason = (event_id & 0xFFFF);
+ if (MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE == reason) {
+ _mali_osk_profiling_record_global_counters(COUNTER_FREQUENCY, data0);
+ _mali_osk_profiling_record_global_counters(COUNTER_VOLTAGE, data1);
+ }
+ }
+ break;
+ case MALI_PROFILING_EVENT_TYPE_START:
+ if ((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) == channel) {
+ _mali_profiling_sampling_core_activity_switch(COUNTER_VP_ACTIVITY, 0, 1, data1);
+ } else if (channel >= (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) &&
+ (MALI_PROFILING_EVENT_CHANNEL_PP7 >> 16) >= channel) {
+ u32 core_id = channel - (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16);
+ _mali_profiling_sampling_core_activity_switch(COUNTER_FP_ACTIVITY, core_id, 1, data1);
+ }
+ break;
+ case MALI_PROFILING_EVENT_TYPE_STOP:
+ if ((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) == channel) {
+ _mali_profiling_sampling_core_activity_switch(COUNTER_VP_ACTIVITY, 0, 0, 0);
+ } else if (channel >= (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) &&
+ (MALI_PROFILING_EVENT_CHANNEL_PP7 >> 16) >= channel) {
+ u32 core_id = channel - (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16);
+ _mali_profiling_sampling_core_activity_switch(COUNTER_FP_ACTIVITY, core_id, 0, 0);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ trace_mali_timeline_event(event_id, data0, data1, data2, data3, data4);
+}
+
+void _mali_osk_profiling_report_sw_counters(u32 *counters)
+{
+ trace_mali_sw_counters(_mali_osk_get_pid(), _mali_osk_get_tid(), NULL, counters);
+}
+
+void _mali_osk_profiling_record_global_counters(int counter_id, u32 value)
+{
+ if (NULL != global_mali_profiling_counters) {
+ int i ;
+ for (i = 0; i < num_global_mali_profiling_counters; i++) {
+ if (counter_id == global_mali_profiling_counters[i].counter_id && global_mali_profiling_counters[i].enabled) {
+ global_mali_profiling_counters[i].current_counter_value = value;
+ break;
+ }
+ }
+ }
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args)
+{
+ /* Always add process and thread identificator in the first two data elements for events from user space */
+ _mali_osk_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args)
+{
+ u32 *counters = (u32 *)(uintptr_t)args->counters;
+
+ _mali_osk_profiling_report_sw_counters(counters);
+
+ if (NULL != global_mali_profiling_counters) {
+ int i;
+ for (i = 0; i < MALI_PROFILING_SW_COUNTERS_NUM; i ++) {
+ if (global_mali_profiling_counters[first_sw_counter_index + i].enabled) {
+ global_mali_profiling_counters[first_sw_counter_index + i].current_counter_value = *(counters + i);
+ }
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_stream_fd_get(_mali_uk_profiling_stream_fd_get_s *args)
+{
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (1 == _mali_osk_atomic_inc_return(&stream_fd_if_used)) {
+
+ s32 fd = anon_inode_getfd("[mali_profiling_stream]", &mali_profiling_stream_fops,
+ session,
+ O_RDONLY | O_CLOEXEC);
+
+ args->stream_fd = fd;
+ if (0 > fd) {
+ _mali_osk_atomic_dec(&stream_fd_if_used);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ args->stream_fd = fd;
+ } else {
+ _mali_osk_atomic_dec(&stream_fd_if_used);
+ args->stream_fd = -1;
+ return _MALI_OSK_ERR_BUSY;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_control_set(_mali_uk_profiling_control_set_s *args)
+{
+ u32 control_packet_size;
+ u32 output_buffer_size;
+
+ struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (NULL == global_mali_profiling_counters && MALI_FALSE == _mali_profiling_global_counters_init()) {
+ MALI_PRINT_ERROR(("Failed to create global_mali_profiling_counters.\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ control_packet_size = args->control_packet_size;
+ output_buffer_size = args->response_packet_size;
+
+ if (0 != control_packet_size) {
+ u8 control_type;
+ u8 *control_packet_data;
+ u8 *response_packet_data;
+ u32 version_length = sizeof(utgard_setup_version) - 1;
+
+ control_packet_data = (u8 *)(uintptr_t)args->control_packet_data;
+ MALI_DEBUG_ASSERT_POINTER(control_packet_data);
+ response_packet_data = (u8 *)(uintptr_t)args->response_packet_data;
+ MALI_DEBUG_ASSERT_POINTER(response_packet_data);
+
+ /*Decide if need to ignore Utgard setup version.*/
+ if (control_packet_size >= version_length) {
+ if (0 == memcmp(control_packet_data, utgard_setup_version, version_length)) {
+ if (control_packet_size == version_length) {
+ args->response_packet_size = 0;
+ return _MALI_OSK_ERR_OK;
+ } else {
+ control_packet_data += version_length;
+ control_packet_size -= version_length;
+ }
+ }
+ }
+
+ current_profiling_pid = _mali_osk_get_pid();
+
+ control_type = control_packet_data[0];
+ switch (control_type) {
+ case PACKET_HEADER_COUNTERS_REQUEST: {
+ int i;
+
+ if (PACKET_HEADER_SIZE > control_packet_size ||
+ control_packet_size != _mali_profiling_get_packet_size(control_packet_data + 1)) {
+ MALI_PRINT_ERROR(("Wrong control packet size, type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Send supported counters */
+ *response_packet_data = PACKET_HEADER_COUNTERS_ACK;
+ args->response_packet_size = PACKET_HEADER_SIZE;
+
+ for (i = 0; i < num_global_mali_profiling_counters; ++i) {
+ u32 name_size = strlen(global_mali_profiling_counters[i].counter_name);
+
+ if ((args->response_packet_size + name_size + 1) > output_buffer_size) {
+ MALI_PRINT_ERROR(("Response packet data is too large..\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ memcpy(response_packet_data + args->response_packet_size,
+ global_mali_profiling_counters[i].counter_name, name_size + 1);
+
+ args->response_packet_size += (name_size + 1);
+
+ if (global_mali_profiling_counters[i].counter_id == COUNTER_VP_ACTIVITY) {
+ args->response_packet_size += _mali_profiling_pack_int(response_packet_data,
+ output_buffer_size, args->response_packet_size, (s32)1);
+ } else if (global_mali_profiling_counters[i].counter_id == COUNTER_FP_ACTIVITY) {
+ args->response_packet_size += _mali_profiling_pack_int(response_packet_data,
+ output_buffer_size, args->response_packet_size, (s32)mali_pp_get_glob_num_pp_cores());
+ } else {
+ args->response_packet_size += _mali_profiling_pack_int(response_packet_data,
+ output_buffer_size, args->response_packet_size, (s32) - 1);
+ }
+ }
+
+ _mali_profiling_set_packet_size(response_packet_data + 1, args->response_packet_size);
+ break;
+ }
+
+ case PACKET_HEADER_COUNTERS_ENABLE: {
+ int i;
+ u32 request_pos = PACKET_HEADER_SIZE;
+ mali_bool sw_counter_if_enabled = MALI_FALSE;
+
+ if (PACKET_HEADER_SIZE > control_packet_size ||
+ control_packet_size != _mali_profiling_get_packet_size(control_packet_data + 1)) {
+ MALI_PRINT_ERROR(("Wrong control packet size , type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Init all counter states before enable requested counters.*/
+ for (i = 0; i < num_global_mali_profiling_counters; ++i) {
+ _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id, MALI_HW_CORE_NO_COUNTER);
+ global_mali_profiling_counters[i].enabled = 0;
+ global_mali_profiling_counters[i].prev_counter_value = 0;
+ global_mali_profiling_counters[i].current_counter_value = 0;
+
+ if (global_mali_profiling_counters[i].counter_id >= FIRST_MEM_COUNTER &&
+ global_mali_profiling_counters[i].counter_id <= LAST_MEM_COUNTER) {
+ _mali_profiling_notification_mem_counter(session, global_mali_profiling_counters[i].counter_id, 0, 0);
+ }
+ }
+
+ l2_cache_counter_if_enabled = MALI_FALSE;
+ num_counters_enabled = 0;
+ mem_counters_enabled = 0;
+ _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 0);
+ _mali_profiling_control(SW_COUNTER_ENABLE, 0);
+ _mali_profiling_notification_enable(session, 0, 0);
+
+ /* Enable requested counters */
+ while (request_pos < control_packet_size) {
+ u32 begin = request_pos;
+ u32 event;
+ u32 key;
+
+ while (request_pos < control_packet_size && control_packet_data[request_pos] != '\0') {
+ ++request_pos;
+ }
+
+ ++request_pos;
+ event = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size);
+ key = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size);
+
+ for (i = 0; i < num_global_mali_profiling_counters; ++i) {
+ u32 name_size = strlen((char *)(control_packet_data + begin));
+ if (strncmp(global_mali_profiling_counters[i].counter_name, (char *)(control_packet_data + begin), name_size) == 0) {
+ if (!sw_counter_if_enabled && (FIRST_SW_COUNTER <= global_mali_profiling_counters[i].counter_id
+ && global_mali_profiling_counters[i].counter_id <= LAST_SW_COUNTER)) {
+ sw_counter_if_enabled = MALI_TRUE;
+ _mali_profiling_control(SW_COUNTER_ENABLE, 1);
+ }
+
+ if (COUNTER_FILMSTRIP == global_mali_profiling_counters[i].counter_id) {
+ _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 1);
+ _mali_profiling_control(FBDUMP_CONTROL_RATE, event & 0xff);
+ _mali_profiling_control(FBDUMP_CONTROL_RESIZE_FACTOR, (event >> 8) & 0xff);
+ }
+
+ if (global_mali_profiling_counters[i].counter_id >= FIRST_MEM_COUNTER &&
+ global_mali_profiling_counters[i].counter_id <= LAST_MEM_COUNTER) {
+ _mali_profiling_notification_mem_counter(session, global_mali_profiling_counters[i].counter_id,
+ key, 1);
+ mem_counters_enabled++;
+ }
+
+ global_mali_profiling_counters[i].counter_event = event;
+ global_mali_profiling_counters[i].key = key;
+ global_mali_profiling_counters[i].enabled = 1;
+
+ _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id,
+ global_mali_profiling_counters[i].counter_event);
+ num_counters_enabled++;
+ break;
+ }
+ }
+
+ if (i == num_global_mali_profiling_counters) {
+ MALI_PRINT_ERROR(("Counter name does not match for type %u.\n", control_type));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ if (PACKET_HEADER_SIZE <= output_buffer_size) {
+ *response_packet_data = PACKET_HEADER_ACK;
+ _mali_profiling_set_packet_size(response_packet_data + 1, PACKET_HEADER_SIZE);
+ args->response_packet_size = PACKET_HEADER_SIZE;
+ } else {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ break;
+ }
+
+ case PACKET_HEADER_START_CAPTURE_VALUE: {
+ u32 live_rate;
+ u32 request_pos = PACKET_HEADER_SIZE;
+
+ if (PACKET_HEADER_SIZE > control_packet_size ||
+ control_packet_size != _mali_profiling_get_packet_size(control_packet_data + 1)) {
+ MALI_PRINT_ERROR(("Wrong control packet size , type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Read samping rate in nanoseconds and live rate, start capture.*/
+ profiling_sample_rate = _mali_profiling_read_packet_int(control_packet_data,
+ &request_pos, control_packet_size);
+
+ live_rate = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size);
+
+ if (PACKET_HEADER_SIZE <= output_buffer_size) {
+ *response_packet_data = PACKET_HEADER_ACK;
+ _mali_profiling_set_packet_size(response_packet_data + 1, PACKET_HEADER_SIZE);
+ args->response_packet_size = PACKET_HEADER_SIZE;
+ } else {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (0 != num_counters_enabled && 0 != profiling_sample_rate) {
+ _mali_profiling_global_stream_list_free();
+ if (mem_counters_enabled > 0) {
+ _mali_profiling_notification_enable(session, profiling_sample_rate, 1);
+ }
+ hrtimer_start(&profiling_sampling_timer,
+ ktime_set(profiling_sample_rate / 1000000000, profiling_sample_rate % 1000000000),
+ HRTIMER_MODE_REL_PINNED);
+ }
+
+ break;
+ }
+ default:
+ MALI_PRINT_ERROR(("Unsupported profiling packet header type %u.\n", control_type));
+ args->response_packet_size = 0;
+ return _MALI_OSK_ERR_FAULT;
+ }
+ } else {
+ _mali_osk_profiling_stop_sampling(current_profiling_pid);
+ _mali_profiling_notification_enable(session, 0, 0);
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Called by gator.ko to set HW counters
+ *
+ * @param counter_id The counter ID.
+ * @param event_id Event ID that the counter should count (HW counter value from TRM).
+ *
+ * @return 1 on success, 0 on failure.
+ */
+int _mali_profiling_set_event(u32 counter_id, s32 event_id)
+{
+ if (COUNTER_VP_0_C0 == counter_id) {
+ mali_gp_job_set_gp_counter_src0(event_id);
+ } else if (COUNTER_VP_0_C1 == counter_id) {
+ mali_gp_job_set_gp_counter_src1(event_id);
+ } else if (COUNTER_FP_0_C0 <= counter_id && COUNTER_FP_7_C1 >= counter_id) {
+ /*
+ * Two compatibility notes for this function:
+ *
+ * 1) Previously the DDK allowed per core counters.
+ *
+ * This did not make much sense on Mali-450 with the "virtual PP core" concept,
+ * so this option was removed, and only the same pair of HW counters was allowed on all cores,
+ * beginning with r3p2 release.
+ *
+ * Starting with r4p0, it is now possible to set different HW counters for the different sub jobs.
+ * This should be almost the same, since sub job 0 is designed to run on core 0,
+ * sub job 1 on core 1, and so on.
+ *
+ * The scheduling of PP sub jobs is not predictable, and this often led to situations where core 0 ran 2
+ * sub jobs, while for instance core 1 ran zero. Having the counters set per sub job would thus increase
+ * the predictability of the returned data (as you would be guaranteed data for all the selected HW counters).
+ *
+ * PS: Core scaling needs to be disabled in order to use this reliably (goes for both solutions).
+ *
+ * The framework/#defines with Gator still indicates that the counter is for a particular core,
+ * but this is internally used as a sub job ID instead (no translation needed).
+ *
+ * 2) Global/default vs per sub job counters
+ *
+ * Releases before r3p2 had only per PP core counters.
+ * r3p2 releases had only one set of default/global counters which applied to all PP cores
+ * Starting with r4p0, we have both a set of default/global counters,
+ * and individual counters per sub job (equal to per core).
+ *
+ * To keep compatibility with Gator/DS-5/streamline, the following scheme is used:
+ *
+ * r3p2 release; only counters set for core 0 is handled,
+ * this is applied as the default/global set of counters, and will thus affect all cores.
+ *
+ * r4p0 release; counters set for core 0 is applied as both the global/default set of counters,
+ * and counters for sub job 0.
+ * Counters set for core 1-7 is only applied for the corresponding sub job.
+ *
+ * This should allow the DS-5/Streamline GUI to have a simple mode where it only allows setting the
+ * values for core 0, and thus this will be applied to all PP sub jobs/cores.
+ * Advanced mode will also be supported, where individual pairs of HW counters can be selected.
+ *
+ * The GUI will (until it is updated) still refer to cores instead of sub jobs, but this is probably
+ * something we can live with!
+ *
+ * Mali-450 note: Each job is not divided into a deterministic number of sub jobs, as the HW DLBU
+ * automatically distributes the load between whatever number of cores is available at this particular time.
+ * A normal PP job on Mali-450 is thus considered a single (virtual) job, and it will thus only be possible
+ * to use a single pair of HW counters (even if the job ran on multiple PP cores).
+ * In other words, only the global/default pair of PP HW counters will be used for normal Mali-450 jobs.
+ */
+ u32 sub_job = (counter_id - COUNTER_FP_0_C0) >> 1;
+ u32 counter_src = (counter_id - COUNTER_FP_0_C0) & 1;
+ if (0 == counter_src) {
+ mali_pp_job_set_pp_counter_sub_job_src0(sub_job, event_id);
+ if (0 == sub_job) {
+ mali_pp_job_set_pp_counter_global_src0(event_id);
+ }
+ } else {
+ mali_pp_job_set_pp_counter_sub_job_src1(sub_job, event_id);
+ if (0 == sub_job) {
+ mali_pp_job_set_pp_counter_global_src1(event_id);
+ }
+ }
+ } else if (COUNTER_L2_0_C0 <= counter_id && COUNTER_L2_2_C1 >= counter_id) {
+ u32 core_id = (counter_id - COUNTER_L2_0_C0) >> 1;
+ struct mali_l2_cache_core *l2_cache_core = mali_l2_cache_core_get_glob_l2_core(core_id);
+
+ if (NULL != l2_cache_core) {
+ u32 counter_src = (counter_id - COUNTER_L2_0_C0) & 1;
+ mali_l2_cache_core_set_counter_src(l2_cache_core,
+ counter_src, event_id);
+ l2_cache_counter_if_enabled = MALI_TRUE;
+ }
+ } else {
+ return 0; /* Failure, unknown event */
+ }
+
+ return 1; /* success */
+}
+
+/**
+ * Called by gator.ko to retrieve the L2 cache counter values for all L2 cache cores.
+ * The L2 cache counters are unique in that they are polled by gator, rather than being
+ * transmitted via the tracepoint mechanism.
+ *
+ * @param values Pointer to a _mali_profiling_l2_counter_values structure where
+ * the counter sources and values will be output
+ * @return 0 if all went well; otherwise, return the mask with the bits set for the powered off cores
+ */
+u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values)
+{
+ u32 l2_cores_num = mali_l2_cache_core_get_glob_num_l2_cores();
+ u32 i;
+
+ MALI_DEBUG_ASSERT(l2_cores_num <= 3);
+
+ for (i = 0; i < l2_cores_num; i++) {
+ struct mali_l2_cache_core *l2_cache = mali_l2_cache_core_get_glob_l2_core(i);
+
+ if (NULL == l2_cache) {
+ continue;
+ }
+
+ mali_l2_cache_core_get_counter_values(l2_cache,
+ &values->cores[i].source0,
+ &values->cores[i].value0,
+ &values->cores[i].source1,
+ &values->cores[i].value1);
+ }
+
+ return 0;
+}
+
+/**
+ * Called by gator to control the production of profiling information at runtime.
+ */
+void _mali_profiling_control(u32 action, u32 value)
+{
+ switch (action) {
+ case FBDUMP_CONTROL_ENABLE:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED, (value == 0 ? MALI_FALSE : MALI_TRUE));
+ break;
+ case FBDUMP_CONTROL_RATE:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES, value);
+ break;
+ case SW_COUNTER_ENABLE:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_SW_COUNTER_ENABLED, value);
+ break;
+ case FBDUMP_CONTROL_RESIZE_FACTOR:
+ mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR, value);
+ break;
+ default:
+ break; /* Ignore unimplemented actions */
+ }
+}
+
+/**
+ * Called by gator to get mali api version.
+ */
+u32 _mali_profiling_get_api_version(void)
+{
+ return MALI_PROFILING_API_VERSION;
+}
+
+/**
+* Called by gator to get the data about Mali instance in use:
+* product id, version, number of cores
+*/
+void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *values)
+{
+ values->mali_product_id = (u32)mali_kernel_core_get_product_id();
+ values->mali_version_major = mali_kernel_core_get_gpu_major_version();
+ values->mali_version_minor = mali_kernel_core_get_gpu_minor_version();
+ values->num_of_l2_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+ values->num_of_fp_cores = mali_executor_get_num_cores_total();
+ values->num_of_vp_cores = 1;
+}
+
+
+EXPORT_SYMBOL(_mali_profiling_set_event);
+EXPORT_SYMBOL(_mali_profiling_get_l2_counters);
+EXPORT_SYMBOL(_mali_profiling_control);
+EXPORT_SYMBOL(_mali_profiling_get_api_version);
+EXPORT_SYMBOL(_mali_profiling_get_mali_version);
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_specific.h b/drivers/gpu/arm/utgard/linux/mali_osk_specific.h
new file mode 100644
index 000000000000..db034a5b3c70
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_specific.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_specific.h
+ * Defines per-OS Kernel level specifics, such as unusual workarounds for
+ * certain OSs.
+ */
+
+#ifndef __MALI_OSK_SPECIFIC_H__
+#define __MALI_OSK_SPECIFIC_H__
+
+#include <asm/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/gfp.h>
+#include <linux/hardirq.h>
+
+
+#include "mali_osk_types.h"
+#include "mali_kernel_linux.h"
+
+#define MALI_STATIC_INLINE static inline
+#define MALI_NON_STATIC_INLINE inline
+
+typedef struct dma_pool *mali_dma_pool;
+
+typedef u32 mali_dma_addr;
+
+#if MALI_ENABLE_CPU_CYCLES
+/* Reads out the clock cycle performance counter of the current cpu.
+ It is useful for cost-free (2 cycle) measuring of the time spent
+ in a code path. Sample before and after, the diff number of cycles.
+ When the CPU is idle it will not increase this clock counter.
+ It means that the counter is accurate if only spin-locks are used,
+ but mutexes may lead to too low values since the cpu might "idle"
+ waiting for the mutex to become available.
+ The clock source is configured on the CPU during mali module load,
+ but will not give useful output after a CPU has been power cycled.
+ It is therefore important to configure the system to not turn of
+ the cpu cores when using this functionallity.*/
+static inline unsigned int mali_get_cpu_cyclecount(void)
+{
+ unsigned int value;
+ /* Reading the CCNT Register - CPU clock counter */
+ asm volatile("MRC p15, 0, %0, c9, c13, 0\t\n": "=r"(value));
+ return value;
+}
+
+void mali_init_cpu_time_counters(int reset, int enable_divide_by_64);
+#endif
+
+
+MALI_STATIC_INLINE u32 _mali_osk_copy_from_user(void *to, void *from, u32 n)
+{
+ return (u32)copy_from_user(to, from, (unsigned long)n);
+}
+
+MALI_STATIC_INLINE mali_bool _mali_osk_in_atomic(void)
+{
+ return in_atomic();
+}
+
+#define _mali_osk_put_user(x, ptr) put_user(x, ptr)
+
+#endif /* __MALI_OSK_SPECIFIC_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_time.c b/drivers/gpu/arm/utgard/linux/mali_osk_time.c
new file mode 100644
index 000000000000..4deaa101e48f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_time.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2010, 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_time.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <asm/delay.h>
+
+mali_bool _mali_osk_time_after_eq(unsigned long ticka, unsigned long tickb)
+{
+ return time_after_eq(ticka, tickb) ?
+ MALI_TRUE : MALI_FALSE;
+}
+
+unsigned long _mali_osk_time_mstoticks(u32 ms)
+{
+ return msecs_to_jiffies(ms);
+}
+
+u32 _mali_osk_time_tickstoms(unsigned long ticks)
+{
+ return jiffies_to_msecs(ticks);
+}
+
+unsigned long _mali_osk_time_tickcount(void)
+{
+ return jiffies;
+}
+
+void _mali_osk_time_ubusydelay(u32 usecs)
+{
+ udelay(usecs);
+}
+
+u64 _mali_osk_time_get_ns(void)
+{
+ struct timespec tsval;
+ getnstimeofday(&tsval);
+ return (u64)timespec_to_ns(&tsval);
+}
+
+u64 _mali_osk_boot_time_get_ns(void)
+{
+ struct timespec tsval;
+ get_monotonic_boottime(&tsval);
+ return (u64)timespec_to_ns(&tsval);
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_timers.c b/drivers/gpu/arm/utgard/linux/mali_osk_timers.c
new file mode 100644
index 000000000000..6bbaee749d64
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_timers.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_timers.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_timer_t_struct {
+ struct timer_list timer;
+};
+
+typedef void (*timer_timeout_function_t)(unsigned long);
+
+_mali_osk_timer_t *_mali_osk_timer_init(void)
+{
+ _mali_osk_timer_t *t = (_mali_osk_timer_t *)kmalloc(sizeof(_mali_osk_timer_t), GFP_KERNEL);
+ if (NULL != t) init_timer(&t->timer);
+ return t;
+}
+
+void _mali_osk_timer_add(_mali_osk_timer_t *tim, unsigned long ticks_to_expire)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.expires = jiffies + ticks_to_expire;
+ add_timer(&(tim->timer));
+}
+
+void _mali_osk_timer_mod(_mali_osk_timer_t *tim, unsigned long ticks_to_expire)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ mod_timer(&(tim->timer), jiffies + ticks_to_expire);
+}
+
+void _mali_osk_timer_del(_mali_osk_timer_t *tim)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ del_timer_sync(&(tim->timer));
+}
+
+void _mali_osk_timer_del_async(_mali_osk_timer_t *tim)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ del_timer(&(tim->timer));
+}
+
+mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ return 1 == timer_pending(&(tim->timer));
+}
+
+void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.data = (unsigned long)data;
+ tim->timer.function = (timer_timeout_function_t)callback;
+}
+
+void _mali_osk_timer_term(_mali_osk_timer_t *tim)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ kfree(tim);
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_wait_queue.c b/drivers/gpu/arm/utgard/linux/mali_osk_wait_queue.c
new file mode 100644
index 000000000000..15d5ce250eb1
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_wait_queue.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_wait_queue.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_wait_queue_t_struct {
+ wait_queue_head_t wait_queue;
+};
+
+_mali_osk_wait_queue_t *_mali_osk_wait_queue_init(void)
+{
+ _mali_osk_wait_queue_t *ret = NULL;
+
+ ret = kmalloc(sizeof(_mali_osk_wait_queue_t), GFP_KERNEL);
+
+ if (NULL == ret) {
+ return ret;
+ }
+
+ init_waitqueue_head(&ret->wait_queue);
+ MALI_DEBUG_ASSERT(!waitqueue_active(&ret->wait_queue));
+
+ return ret;
+}
+
+void _mali_osk_wait_queue_wait_event(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data)
+{
+ MALI_DEBUG_ASSERT_POINTER(queue);
+ MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue));
+ wait_event(queue->wait_queue, condition(data));
+}
+
+void _mali_osk_wait_queue_wait_event_timeout(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data, u32 timeout)
+{
+ MALI_DEBUG_ASSERT_POINTER(queue);
+ MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue));
+ wait_event_timeout(queue->wait_queue, condition(data), _mali_osk_time_mstoticks(timeout));
+}
+
+void _mali_osk_wait_queue_wake_up(_mali_osk_wait_queue_t *queue)
+{
+ MALI_DEBUG_ASSERT_POINTER(queue);
+
+ /* if queue is empty, don't attempt to wake up its elements */
+ if (!waitqueue_active(&queue->wait_queue)) return;
+
+ MALI_DEBUG_PRINT(6, ("Waking up elements in wait queue %p ....\n", queue));
+
+ wake_up_all(&queue->wait_queue);
+
+ MALI_DEBUG_PRINT(6, ("... elements in wait queue %p woken up\n", queue));
+}
+
+void _mali_osk_wait_queue_term(_mali_osk_wait_queue_t *queue)
+{
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER(queue);
+
+ /* Linux requires no explicit termination of wait queues */
+ kfree(queue);
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_osk_wq.c b/drivers/gpu/arm/utgard/linux/mali_osk_wq.c
new file mode 100644
index 000000000000..2c34c91a7922
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_osk_wq.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_wq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h> /* For memory allocation */
+#include <linux/workqueue.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_kernel_linux.h"
+
+typedef struct _mali_osk_wq_work_s {
+ _mali_osk_wq_work_handler_t handler;
+ void *data;
+ mali_bool high_pri;
+ struct work_struct work_handle;
+} mali_osk_wq_work_object_t;
+
+typedef struct _mali_osk_wq_delayed_work_s {
+ _mali_osk_wq_work_handler_t handler;
+ void *data;
+ struct delayed_work work;
+} mali_osk_wq_delayed_work_object_t;
+
+#if MALI_LICENSE_IS_GPL
+static struct workqueue_struct *mali_wq_normal = NULL;
+static struct workqueue_struct *mali_wq_high = NULL;
+#endif
+
+static void _mali_osk_wq_work_func(struct work_struct *work);
+
+_mali_osk_errcode_t _mali_osk_wq_init(void)
+{
+#if MALI_LICENSE_IS_GPL
+ MALI_DEBUG_ASSERT(NULL == mali_wq_normal);
+ MALI_DEBUG_ASSERT(NULL == mali_wq_high);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+ mali_wq_normal = alloc_workqueue("mali", WQ_UNBOUND, 0);
+ mali_wq_high = alloc_workqueue("mali_high_pri", WQ_HIGHPRI | WQ_UNBOUND, 0);
+#else
+ mali_wq_normal = create_workqueue("mali");
+ mali_wq_high = create_workqueue("mali_high_pri");
+#endif
+ if (NULL == mali_wq_normal || NULL == mali_wq_high) {
+ MALI_PRINT_ERROR(("Unable to create Mali workqueues\n"));
+
+ if (mali_wq_normal) destroy_workqueue(mali_wq_normal);
+ if (mali_wq_high) destroy_workqueue(mali_wq_high);
+
+ mali_wq_normal = NULL;
+ mali_wq_high = NULL;
+
+ return _MALI_OSK_ERR_FAULT;
+ }
+#endif /* MALI_LICENSE_IS_GPL */
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_wq_flush(void)
+{
+#if MALI_LICENSE_IS_GPL
+ flush_workqueue(mali_wq_high);
+ flush_workqueue(mali_wq_normal);
+#else
+ flush_scheduled_work();
+#endif
+}
+
+void _mali_osk_wq_term(void)
+{
+#if MALI_LICENSE_IS_GPL
+ MALI_DEBUG_ASSERT(NULL != mali_wq_normal);
+ MALI_DEBUG_ASSERT(NULL != mali_wq_high);
+
+ flush_workqueue(mali_wq_normal);
+ destroy_workqueue(mali_wq_normal);
+
+ flush_workqueue(mali_wq_high);
+ destroy_workqueue(mali_wq_high);
+
+ mali_wq_normal = NULL;
+ mali_wq_high = NULL;
+#else
+ flush_scheduled_work();
+#endif
+}
+
+_mali_osk_wq_work_t *_mali_osk_wq_create_work(_mali_osk_wq_work_handler_t handler, void *data)
+{
+ mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL);
+
+ if (NULL == work) return NULL;
+
+ work->handler = handler;
+ work->data = data;
+ work->high_pri = MALI_FALSE;
+
+ INIT_WORK(&work->work_handle, _mali_osk_wq_work_func);
+
+ return work;
+}
+
+_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri(_mali_osk_wq_work_handler_t handler, void *data)
+{
+ mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL);
+
+ if (NULL == work) return NULL;
+
+ work->handler = handler;
+ work->data = data;
+ work->high_pri = MALI_TRUE;
+
+ INIT_WORK(&work->work_handle, _mali_osk_wq_work_func);
+
+ return work;
+}
+
+void _mali_osk_wq_delete_work(_mali_osk_wq_work_t *work)
+{
+ mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+ _mali_osk_wq_flush();
+ kfree(work_object);
+}
+
+void _mali_osk_wq_delete_work_nonflush(_mali_osk_wq_work_t *work)
+{
+ mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+ kfree(work_object);
+}
+
+void _mali_osk_wq_schedule_work(_mali_osk_wq_work_t *work)
+{
+ mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+#if MALI_LICENSE_IS_GPL
+ queue_work(mali_wq_normal, &work_object->work_handle);
+#else
+ schedule_work(&work_object->work_handle);
+#endif
+}
+
+void _mali_osk_wq_schedule_work_high_pri(_mali_osk_wq_work_t *work)
+{
+ mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work;
+#if MALI_LICENSE_IS_GPL
+ queue_work(mali_wq_high, &work_object->work_handle);
+#else
+ schedule_work(&work_object->work_handle);
+#endif
+}
+
+static void _mali_osk_wq_work_func(struct work_struct *work)
+{
+ mali_osk_wq_work_object_t *work_object;
+
+ work_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_wq_work_object_t, work_handle);
+
+#if MALI_LICENSE_IS_GPL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+ /* We want highest Dynamic priority of the thread so that the Jobs depending
+ ** on this thread could be scheduled in time. Without this, this thread might
+ ** sometimes need to wait for some threads in user mode to finish its round-robin
+ ** time, causing *bubble* in the Mali pipeline. Thanks to the new implementation
+ ** of high-priority workqueue in new kernel, this only happens in older kernel.
+ */
+ if (MALI_TRUE == work_object->high_pri) {
+ set_user_nice(current, -19);
+ }
+#endif
+#endif /* MALI_LICENSE_IS_GPL */
+
+ work_object->handler(work_object->data);
+}
+
+static void _mali_osk_wq_delayed_work_func(struct work_struct *work)
+{
+ mali_osk_wq_delayed_work_object_t *work_object;
+
+ work_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_wq_delayed_work_object_t, work.work);
+ work_object->handler(work_object->data);
+}
+
+mali_osk_wq_delayed_work_object_t *_mali_osk_wq_delayed_create_work(_mali_osk_wq_work_handler_t handler, void *data)
+{
+ mali_osk_wq_delayed_work_object_t *work = kmalloc(sizeof(mali_osk_wq_delayed_work_object_t), GFP_KERNEL);
+
+ if (NULL == work) return NULL;
+
+ work->handler = handler;
+ work->data = data;
+
+ INIT_DELAYED_WORK(&work->work, _mali_osk_wq_delayed_work_func);
+
+ return work;
+}
+
+void _mali_osk_wq_delayed_delete_work_nonflush(_mali_osk_wq_delayed_work_t *work)
+{
+ mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+ kfree(work_object);
+}
+
+void _mali_osk_wq_delayed_cancel_work_async(_mali_osk_wq_delayed_work_t *work)
+{
+ mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+ cancel_delayed_work(&work_object->work);
+}
+
+void _mali_osk_wq_delayed_cancel_work_sync(_mali_osk_wq_delayed_work_t *work)
+{
+ mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+ cancel_delayed_work_sync(&work_object->work);
+}
+
+void _mali_osk_wq_delayed_schedule_work(_mali_osk_wq_delayed_work_t *work, u32 delay)
+{
+ mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
+
+#if MALI_LICENSE_IS_GPL
+ queue_delayed_work(mali_wq_normal, &work_object->work, delay);
+#else
+ schedule_delayed_work(&work_object->work, delay);
+#endif
+
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_pmu_power_up_down.c b/drivers/gpu/arm/utgard/linux/mali_pmu_power_up_down.c
new file mode 100644
index 000000000000..61ff5c8fdca8
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_pmu_power_up_down.c
@@ -0,0 +1,23 @@
+/**
+ * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu_power_up_down.c
+ */
+
+#include <linux/module.h>
+#include "mali_executor.h"
+
+int mali_perf_set_num_pp_cores(unsigned int num_cores)
+{
+ return mali_executor_set_perf_level(num_cores, MALI_FALSE);
+}
+
+EXPORT_SYMBOL(mali_perf_set_num_pp_cores);
diff --git a/drivers/gpu/arm/utgard/linux/mali_profiling_events.h b/drivers/gpu/arm/utgard/linux/mali_profiling_events.h
new file mode 100644
index 000000000000..0b90e8c5cf26
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_profiling_events.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_EVENTS_H__
+#define __MALI_PROFILING_EVENTS_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_profiling_events.h>
+
+#endif /* __MALI_PROFILING_EVENTS_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_profiling_gator_api.h b/drivers/gpu/arm/utgard/linux/mali_profiling_gator_api.h
new file mode 100644
index 000000000000..c98d127366ba
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_profiling_gator_api.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012-2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_GATOR_API_H__
+#define __MALI_PROFILING_GATOR_API_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_profiling_gator_api.h>
+
+#endif /* __MALI_PROFILING_GATOR_API_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_profiling_internal.c b/drivers/gpu/arm/utgard/linux/mali_profiling_internal.c
new file mode 100644
index 000000000000..12aef4194ff5
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_profiling_internal.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_timestamp.h"
+#include "mali_osk_profiling.h"
+#include "mali_user_settings_db.h"
+#include "mali_profiling_internal.h"
+
+typedef struct mali_profiling_entry {
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+} mali_profiling_entry;
+
+typedef enum mali_profiling_state {
+ MALI_PROFILING_STATE_UNINITIALIZED,
+ MALI_PROFILING_STATE_IDLE,
+ MALI_PROFILING_STATE_RUNNING,
+ MALI_PROFILING_STATE_RETURN,
+} mali_profiling_state;
+
+static _mali_osk_mutex_t *lock = NULL;
+static mali_profiling_state prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+static mali_profiling_entry *profile_entries = NULL;
+static _mali_osk_atomic_t profile_insert_index;
+static u32 profile_mask = 0;
+
+static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
+
+void probe_mali_timeline_event(void *data, TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1, unsigned
+ int d2, unsigned int d3, unsigned int d4))
+{
+ add_event(event_id, d0, d1, d2, d3, d4);
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_init(mali_bool auto_start)
+{
+ profile_entries = NULL;
+ profile_mask = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+
+ lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PROFILING);
+ if (NULL == lock) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+
+ if (MALI_TRUE == auto_start) {
+ u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* Use maximum buffer size */
+
+ mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
+ if (_MALI_OSK_ERR_OK != _mali_internal_profiling_start(&limit)) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_internal_profiling_term(void)
+{
+ u32 count;
+
+ /* Ensure profiling is stopped */
+ _mali_internal_profiling_stop(&count);
+
+ prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+
+ if (NULL != profile_entries) {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ if (NULL != lock) {
+ _mali_osk_mutex_term(lock);
+ lock = NULL;
+ }
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_start(u32 *limit)
+{
+ _mali_osk_errcode_t ret;
+ mali_profiling_entry *new_profile_entries;
+
+ _mali_osk_mutex_wait(lock);
+
+ if (MALI_PROFILING_STATE_RUNNING == prof_state) {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_BUSY;
+ }
+
+ new_profile_entries = _mali_osk_valloc(*limit * sizeof(mali_profiling_entry));
+
+ if (NULL == new_profile_entries) {
+ _mali_osk_mutex_signal(lock);
+ _mali_osk_vfree(new_profile_entries);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ if (MALI_PROFILING_MAX_BUFFER_ENTRIES < *limit) {
+ *limit = MALI_PROFILING_MAX_BUFFER_ENTRIES;
+ }
+
+ profile_mask = 1;
+ while (profile_mask <= *limit) {
+ profile_mask <<= 1;
+ }
+ profile_mask >>= 1;
+
+ *limit = profile_mask;
+
+ profile_mask--; /* turns the power of two into a mask of one less */
+
+ if (MALI_PROFILING_STATE_IDLE != prof_state) {
+ _mali_osk_mutex_signal(lock);
+ _mali_osk_vfree(new_profile_entries);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ profile_entries = new_profile_entries;
+
+ ret = _mali_timestamp_reset();
+
+ if (_MALI_OSK_ERR_OK == ret) {
+ prof_state = MALI_PROFILING_STATE_RUNNING;
+ } else {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ register_trace_mali_timeline_event(probe_mali_timeline_event, NULL);
+
+ _mali_osk_mutex_signal(lock);
+ return ret;
+}
+
+static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
+{
+ u32 cur_index = (_mali_osk_atomic_inc_return(&profile_insert_index) - 1) & profile_mask;
+
+ profile_entries[cur_index].timestamp = _mali_timestamp_get();
+ profile_entries[cur_index].event_id = event_id;
+ profile_entries[cur_index].data[0] = data0;
+ profile_entries[cur_index].data[1] = data1;
+ profile_entries[cur_index].data[2] = data2;
+ profile_entries[cur_index].data[3] = data3;
+ profile_entries[cur_index].data[4] = data4;
+
+ /* If event is "leave API function", add current memory usage to the event
+ * as data point 4. This is used in timeline profiling to indicate how
+ * much memory was used when leaving a function. */
+ if (event_id == (MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC)) {
+ profile_entries[cur_index].data[4] = _mali_ukk_report_memory_usage();
+ }
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_stop(u32 *count)
+{
+ _mali_osk_mutex_wait(lock);
+
+ if (MALI_PROFILING_STATE_RUNNING != prof_state) {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ /* go into return state (user to retreive events), no more events will be added after this */
+ prof_state = MALI_PROFILING_STATE_RETURN;
+
+ unregister_trace_mali_timeline_event(probe_mali_timeline_event, NULL);
+
+ _mali_osk_mutex_signal(lock);
+
+ tracepoint_synchronize_unregister();
+
+ *count = _mali_osk_atomic_read(&profile_insert_index);
+ if (*count > profile_mask) *count = profile_mask;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_internal_profiling_get_count(void)
+{
+ u32 retval = 0;
+
+ _mali_osk_mutex_wait(lock);
+ if (MALI_PROFILING_STATE_RETURN == prof_state) {
+ retval = _mali_osk_atomic_read(&profile_insert_index);
+ if (retval > profile_mask) retval = profile_mask;
+ }
+ _mali_osk_mutex_signal(lock);
+
+ return retval;
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5])
+{
+ u32 raw_index = _mali_osk_atomic_read(&profile_insert_index);
+
+ _mali_osk_mutex_wait(lock);
+
+ if (index < profile_mask) {
+ if ((raw_index & ~profile_mask) != 0) {
+ index += raw_index;
+ index &= profile_mask;
+ }
+
+ if (prof_state != MALI_PROFILING_STATE_RETURN) {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ if (index >= raw_index) {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ *timestamp = profile_entries[index].timestamp;
+ *event_id = profile_entries[index].event_id;
+ data[0] = profile_entries[index].data[0];
+ data[1] = profile_entries[index].data[1];
+ data[2] = profile_entries[index].data[2];
+ data[3] = profile_entries[index].data[3];
+ data[4] = profile_entries[index].data[4];
+ } else {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_internal_profiling_clear(void)
+{
+ _mali_osk_mutex_wait(lock);
+
+ if (MALI_PROFILING_STATE_RETURN != prof_state) {
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+ profile_mask = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+
+ if (NULL != profile_entries) {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ _mali_osk_mutex_signal(lock);
+ return _MALI_OSK_ERR_OK;
+}
+
+mali_bool _mali_internal_profiling_is_recording(void)
+{
+ return prof_state == MALI_PROFILING_STATE_RUNNING ? MALI_TRUE : MALI_FALSE;
+}
+
+mali_bool _mali_internal_profiling_have_recording(void)
+{
+ return prof_state == MALI_PROFILING_STATE_RETURN ? MALI_TRUE : MALI_FALSE;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_profiling_internal.h b/drivers/gpu/arm/utgard/linux/mali_profiling_internal.h
new file mode 100644
index 000000000000..1c6f4da691d2
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_profiling_internal.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PROFILING_INTERNAL_H__
+#define __MALI_PROFILING_INTERNAL_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mali_osk.h"
+
+int _mali_internal_profiling_init(mali_bool auto_start);
+void _mali_internal_profiling_term(void);
+
+mali_bool _mali_internal_profiling_is_recording(void);
+mali_bool _mali_internal_profiling_have_recording(void);
+_mali_osk_errcode_t _mali_internal_profiling_clear(void);
+_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5]);
+u32 _mali_internal_profiling_get_count(void);
+int _mali_internal_profiling_stop(u32 *count);
+int _mali_internal_profiling_start(u32 *limit);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PROFILING_INTERNAL_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_sync.c b/drivers/gpu/arm/utgard/linux/mali_sync.c
new file mode 100644
index 000000000000..dc1e3a2a4d73
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_sync.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_sync.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_timeline.h"
+#include "mali_executor.h"
+
+#include <linux/file.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include <linux/fcntl.h>
+
+struct mali_sync_pt {
+ struct sync_pt sync_pt;
+ struct mali_sync_flag *flag;
+ struct sync_timeline *sync_tl; /**< Sync timeline this pt is connected to. */
+};
+
+/**
+ * The sync flag is used to connect sync fences to the Mali Timeline system. Sync fences can be
+ * created from a sync flag, and when the flag is signaled, the sync fences will also be signaled.
+ */
+struct mali_sync_flag {
+ struct sync_timeline *sync_tl; /**< Sync timeline this flag is connected to. */
+ u32 point; /**< Point on timeline. */
+ int status; /**< 0 if unsignaled, 1 if signaled without error or negative if signaled with error. */
+ struct kref refcount; /**< Reference count. */
+};
+
+/**
+ * Mali sync timeline is used to connect mali timeline to sync_timeline.
+ * When fence timeout can print more detailed mali timeline system info.
+ */
+struct mali_sync_timeline_container {
+ struct sync_timeline sync_timeline;
+ struct mali_timeline *timeline;
+};
+
+MALI_STATIC_INLINE struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt)
+{
+ return container_of(pt, struct mali_sync_pt, sync_pt);
+}
+
+MALI_STATIC_INLINE struct mali_sync_timeline_container *to_mali_sync_tl_container(struct sync_timeline *sync_tl)
+{
+ return container_of(sync_tl, struct mali_sync_timeline_container, sync_timeline);
+}
+
+static struct sync_pt *timeline_dup(struct sync_pt *pt)
+{
+ struct mali_sync_pt *mpt, *new_mpt;
+ struct sync_pt *new_pt;
+
+ MALI_DEBUG_ASSERT_POINTER(pt);
+ mpt = to_mali_sync_pt(pt);
+
+ new_pt = sync_pt_create(mpt->sync_tl, sizeof(struct mali_sync_pt));
+ if (NULL == new_pt) return NULL;
+
+ new_mpt = to_mali_sync_pt(new_pt);
+
+ mali_sync_flag_get(mpt->flag);
+ new_mpt->flag = mpt->flag;
+ new_mpt->sync_tl = mpt->sync_tl;
+
+ return new_pt;
+}
+
+static int timeline_has_signaled(struct sync_pt *pt)
+{
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(pt);
+ mpt = to_mali_sync_pt(pt);
+
+ MALI_DEBUG_ASSERT_POINTER(mpt->flag);
+
+ return mpt->flag->status;
+}
+
+static int timeline_compare(struct sync_pt *pta, struct sync_pt *ptb)
+{
+ struct mali_sync_pt *mpta;
+ struct mali_sync_pt *mptb;
+ u32 a, b;
+
+ MALI_DEBUG_ASSERT_POINTER(pta);
+ MALI_DEBUG_ASSERT_POINTER(ptb);
+ mpta = to_mali_sync_pt(pta);
+ mptb = to_mali_sync_pt(ptb);
+
+ MALI_DEBUG_ASSERT_POINTER(mpta->flag);
+ MALI_DEBUG_ASSERT_POINTER(mptb->flag);
+
+ a = mpta->flag->point;
+ b = mptb->flag->point;
+
+ if (a == b) return 0;
+
+ return ((b - a) < (a - b) ? -1 : 1);
+}
+
+static void timeline_free_pt(struct sync_pt *pt)
+{
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(pt);
+ mpt = to_mali_sync_pt(pt);
+
+ mali_sync_flag_put(mpt->flag);
+}
+
+static void timeline_release(struct sync_timeline *sync_timeline)
+{
+ struct mali_sync_timeline_container *mali_sync_tl = NULL;
+ struct mali_timeline *mali_tl = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_timeline);
+
+ mali_sync_tl = to_mali_sync_tl_container(sync_timeline);
+ MALI_DEBUG_ASSERT_POINTER(mali_sync_tl);
+
+ mali_tl = mali_sync_tl->timeline;
+
+ /* always signaled timeline didn't have mali container */
+ if (mali_tl) {
+ if (NULL != mali_tl->spinlock) {
+ mali_spinlock_reentrant_term(mali_tl->spinlock);
+ }
+ _mali_osk_free(mali_tl);
+ }
+
+ module_put(THIS_MODULE);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+static void timeline_print_pt(struct seq_file *s, struct sync_pt *sync_pt)
+{
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(s);
+ MALI_DEBUG_ASSERT_POINTER(sync_pt);
+
+ mpt = to_mali_sync_pt(sync_pt);
+
+ /* It is possible this sync point is just under construct,
+ * make sure the flag is valid before accessing it
+ */
+ if (mpt->flag) {
+ seq_printf(s, "%u", mpt->flag->point);
+ } else {
+ seq_printf(s, "uninitialized");
+ }
+}
+
+static void timeline_print_obj(struct seq_file *s, struct sync_timeline *sync_tl)
+{
+ struct mali_sync_timeline_container *mali_sync_tl = NULL;
+ struct mali_timeline *mali_tl = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_tl);
+
+ mali_sync_tl = to_mali_sync_tl_container(sync_tl);
+ MALI_DEBUG_ASSERT_POINTER(mali_sync_tl);
+
+ mali_tl = mali_sync_tl->timeline;
+
+ if (NULL != mali_tl) {
+ seq_printf(s, "oldest (%u) ", mali_tl->point_oldest);
+ seq_printf(s, "next (%u)", mali_tl->point_next);
+ seq_printf(s, "\n");
+
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+ {
+ u32 tid = _mali_osk_get_tid();
+ struct mali_timeline_system *system = mali_tl->system;
+
+ mali_spinlock_reentrant_wait(mali_tl->spinlock, tid);
+ if (!mali_tl->destroyed) {
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ mali_timeline_debug_print_timeline(mali_tl, s);
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ }
+ mali_spinlock_reentrant_signal(mali_tl->spinlock, tid);
+
+ /* dump job queue status and group running status */
+ mali_executor_status_dump();
+ }
+#endif
+ }
+}
+#else
+static void timeline_pt_value_str(struct sync_pt *pt, char *str, int size)
+{
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(str);
+ MALI_DEBUG_ASSERT_POINTER(pt);
+
+ mpt = to_mali_sync_pt(pt);
+
+ /* It is possible this sync point is just under construct,
+ * make sure the flag is valid before accessing it
+ */
+ if (mpt->flag) {
+ _mali_osk_snprintf(str, size, "%u", mpt->flag->point);
+ } else {
+ _mali_osk_snprintf(str, size, "uninitialized");
+ }
+}
+
+static void timeline_value_str(struct sync_timeline *timeline, char *str, int size)
+{
+ struct mali_sync_timeline_container *mali_sync_tl = NULL;
+ struct mali_timeline *mali_tl = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ mali_sync_tl = to_mali_sync_tl_container(timeline);
+ MALI_DEBUG_ASSERT_POINTER(mali_sync_tl);
+
+ mali_tl = mali_sync_tl->timeline;
+
+ if (NULL != mali_tl) {
+ _mali_osk_snprintf(str, size, "oldest (%u) ", mali_tl->point_oldest);
+ _mali_osk_snprintf(str, size, "next (%u)", mali_tl->point_next);
+ _mali_osk_snprintf(str, size, "\n");
+
+#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
+ {
+ u32 tid = _mali_osk_get_tid();
+ struct mali_timeline_system *system = mali_tl->system;
+
+ mali_spinlock_reentrant_wait(mali_tl->spinlock, tid);
+ if (!mali_tl->destroyed) {
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+ mali_timeline_debug_direct_print_timeline(mali_tl);
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+ }
+ mali_spinlock_reentrant_signal(mali_tl->spinlock, tid);
+
+ /* dump job queue status and group running status */
+ mali_executor_status_dump();
+ }
+#endif
+ }
+}
+#endif
+
+
+static struct sync_timeline_ops mali_timeline_ops = {
+ .driver_name = "Mali",
+ .dup = timeline_dup,
+ .has_signaled = timeline_has_signaled,
+ .compare = timeline_compare,
+ .free_pt = timeline_free_pt,
+ .release_obj = timeline_release,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ .print_pt = timeline_print_pt,
+ .print_obj = timeline_print_obj,
+#else
+ .pt_value_str = timeline_pt_value_str,
+ .timeline_value_str = timeline_value_str,
+#endif
+};
+
+struct sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name)
+{
+ struct sync_timeline *sync_tl;
+ struct mali_sync_timeline_container *mali_sync_tl;
+
+ sync_tl = sync_timeline_create(&mali_timeline_ops, sizeof(struct mali_sync_timeline_container), name);
+ if (NULL == sync_tl) return NULL;
+
+ mali_sync_tl = to_mali_sync_tl_container(sync_tl);
+ mali_sync_tl->timeline = timeline;
+
+ /* Grab a reference on the module to ensure the callbacks are present
+ * as long some timeline exists. The reference is released when the
+ * timeline is freed.
+ * Since this function is called from a ioctl on an open file we know
+ * we already have a reference, so using __module_get is safe. */
+ __module_get(THIS_MODULE);
+
+ return sync_tl;
+}
+
+s32 mali_sync_fence_fd_alloc(struct sync_fence *sync_fence)
+{
+ s32 fd = -1;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ sync_fence_put(sync_fence);
+ return -1;
+ }
+ sync_fence_install(sync_fence, fd);
+
+ return fd;
+}
+
+struct sync_fence *mali_sync_fence_merge(struct sync_fence *sync_fence1, struct sync_fence *sync_fence2)
+{
+ struct sync_fence *sync_fence;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_fence1);
+ MALI_DEBUG_ASSERT_POINTER(sync_fence1);
+
+ sync_fence = sync_fence_merge("mali_merge_fence", sync_fence1, sync_fence2);
+ sync_fence_put(sync_fence1);
+ sync_fence_put(sync_fence2);
+
+ return sync_fence;
+}
+
+struct sync_fence *mali_sync_timeline_create_signaled_fence(struct sync_timeline *sync_tl)
+{
+ struct mali_sync_flag *flag;
+ struct sync_fence *sync_fence;
+
+ MALI_DEBUG_ASSERT_POINTER(sync_tl);
+
+ flag = mali_sync_flag_create(sync_tl, 0);
+ if (NULL == flag) return NULL;
+
+ sync_fence = mali_sync_flag_create_fence(flag);
+
+ mali_sync_flag_signal(flag, 0);
+ mali_sync_flag_put(flag);
+
+ return sync_fence;
+}
+
+struct mali_sync_flag *mali_sync_flag_create(struct sync_timeline *sync_tl, mali_timeline_point point)
+{
+ struct mali_sync_flag *flag;
+
+ if (NULL == sync_tl) return NULL;
+
+ flag = _mali_osk_calloc(1, sizeof(*flag));
+ if (NULL == flag) return NULL;
+
+ flag->sync_tl = sync_tl;
+ flag->point = point;
+
+ flag->status = 0;
+ kref_init(&flag->refcount);
+
+ return flag;
+}
+
+void mali_sync_flag_get(struct mali_sync_flag *flag)
+{
+ MALI_DEBUG_ASSERT_POINTER(flag);
+ kref_get(&flag->refcount);
+}
+
+/**
+ * Free sync flag.
+ *
+ * @param ref kref object embedded in sync flag that should be freed.
+ */
+static void mali_sync_flag_free(struct kref *ref)
+{
+ struct mali_sync_flag *flag;
+
+ MALI_DEBUG_ASSERT_POINTER(ref);
+ flag = container_of(ref, struct mali_sync_flag, refcount);
+
+ _mali_osk_free(flag);
+}
+
+void mali_sync_flag_put(struct mali_sync_flag *flag)
+{
+ MALI_DEBUG_ASSERT_POINTER(flag);
+ kref_put(&flag->refcount, mali_sync_flag_free);
+}
+
+void mali_sync_flag_signal(struct mali_sync_flag *flag, int error)
+{
+ MALI_DEBUG_ASSERT_POINTER(flag);
+
+ MALI_DEBUG_ASSERT(0 == flag->status);
+ flag->status = (0 > error) ? error : 1;
+
+ _mali_osk_write_mem_barrier();
+
+ sync_timeline_signal(flag->sync_tl);
+}
+
+/**
+ * Create a sync point attached to given sync flag.
+ *
+ * @note Sync points must be triggered in *exactly* the same order as they are created.
+ *
+ * @param flag Sync flag.
+ * @return New sync point if successful, NULL if not.
+ */
+static struct sync_pt *mali_sync_flag_create_pt(struct mali_sync_flag *flag)
+{
+ struct sync_pt *pt;
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(flag);
+ MALI_DEBUG_ASSERT_POINTER(flag->sync_tl);
+
+ pt = sync_pt_create(flag->sync_tl, sizeof(struct mali_sync_pt));
+ if (NULL == pt) return NULL;
+
+ mali_sync_flag_get(flag);
+
+ mpt = to_mali_sync_pt(pt);
+ mpt->flag = flag;
+ mpt->sync_tl = flag->sync_tl;
+
+ return pt;
+}
+
+struct sync_fence *mali_sync_flag_create_fence(struct mali_sync_flag *flag)
+{
+ struct sync_pt *sync_pt;
+ struct sync_fence *sync_fence;
+
+ MALI_DEBUG_ASSERT_POINTER(flag);
+ MALI_DEBUG_ASSERT_POINTER(flag->sync_tl);
+
+ sync_pt = mali_sync_flag_create_pt(flag);
+ if (NULL == sync_pt) return NULL;
+
+ sync_fence = sync_fence_create("mali_flag_fence", sync_pt);
+ if (NULL == sync_fence) {
+ sync_pt_free(sync_pt);
+ return NULL;
+ }
+
+ return sync_fence;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_sync.h b/drivers/gpu/arm/utgard/linux/mali_sync.h
new file mode 100644
index 000000000000..0c541ff9a2e0
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_sync.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_sync.h
+ *
+ * Mali interface for Linux sync objects.
+ */
+
+#ifndef _MALI_SYNC_H_
+#define _MALI_SYNC_H_
+
+#if defined(CONFIG_SYNC)
+
+#include <linux/seq_file.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
+#include <linux/sync.h>
+#else
+#include <sync.h>
+#endif
+
+
+#include "mali_osk.h"
+
+struct mali_sync_flag;
+struct mali_timeline;
+
+/**
+ * Create a sync timeline.
+ *
+ * @param name Name of the sync timeline.
+ * @return The new sync timeline if successful, NULL if not.
+ */
+struct sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name);
+
+/**
+ * Creates a file descriptor representing the sync fence. Will release sync fence if allocation of
+ * file descriptor fails.
+ *
+ * @param sync_fence Sync fence.
+ * @return File descriptor representing sync fence if successful, or -1 if not.
+ */
+s32 mali_sync_fence_fd_alloc(struct sync_fence *sync_fence);
+
+/**
+ * Merges two sync fences. Both input sync fences will be released.
+ *
+ * @param sync_fence1 First sync fence.
+ * @param sync_fence2 Second sync fence.
+ * @return New sync fence that is the result of the merger if successful, or NULL if not.
+ */
+struct sync_fence *mali_sync_fence_merge(struct sync_fence *sync_fence1, struct sync_fence *sync_fence2);
+
+/**
+ * Create a sync fence that is already signaled.
+ *
+ * @param tl Sync timeline.
+ * @return New signaled sync fence if successful, NULL if not.
+ */
+struct sync_fence *mali_sync_timeline_create_signaled_fence(struct sync_timeline *sync_tl);
+
+/**
+ * Create a sync flag.
+ *
+ * @param sync_tl Sync timeline.
+ * @param point Point on Mali timeline.
+ * @return New sync flag if successful, NULL if not.
+ */
+struct mali_sync_flag *mali_sync_flag_create(struct sync_timeline *sync_tl, u32 point);
+
+/**
+ * Grab sync flag reference.
+ *
+ * @param flag Sync flag.
+ */
+void mali_sync_flag_get(struct mali_sync_flag *flag);
+
+/**
+ * Release sync flag reference. If this was the last reference, the sync flag will be freed.
+ *
+ * @param flag Sync flag.
+ */
+void mali_sync_flag_put(struct mali_sync_flag *flag);
+
+/**
+ * Signal sync flag. All sync fences created from this flag will be signaled.
+ *
+ * @param flag Sync flag to signal.
+ * @param error Negative error code, or 0 if no error.
+ */
+void mali_sync_flag_signal(struct mali_sync_flag *flag, int error);
+
+/**
+ * Create a sync fence attached to given sync flag.
+ *
+ * @param flag Sync flag.
+ * @return New sync fence if successful, NULL if not.
+ */
+struct sync_fence *mali_sync_flag_create_fence(struct mali_sync_flag *flag);
+
+#endif /* defined(CONFIG_SYNC) */
+
+#endif /* _MALI_SYNC_H_ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_uk_types.h b/drivers/gpu/arm/utgard/linux/mali_uk_types.h
new file mode 100644
index 000000000000..1884cdbba424
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_uk_types.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_UK_TYPES_H__
+#define __MALI_UK_TYPES_H__
+
+/* Simple wrapper in order to find the OS specific location of this file */
+#include <linux/mali/mali_utgard_uk_types.h>
+
+#endif /* __MALI_UK_TYPES_H__ */
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_core.c b/drivers/gpu/arm/utgard/linux/mali_ukk_core.c
new file mode 100644
index 000000000000..41c1f48fc6c7
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_core.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <linux/slab.h> /* memort allocation functions */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs)
+{
+ _mali_uk_get_api_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_get_api_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+ if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+ return 0;
+}
+
+int get_api_version_v2_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_v2_s __user *uargs)
+{
+ _mali_uk_get_api_version_v2_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_get_api_version_v2(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+ if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+ return 0;
+}
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs)
+{
+ _mali_uk_wait_for_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_wait_for_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (_MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS != kargs.type) {
+ kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_wait_for_notification_s))) return -EFAULT;
+ } else {
+ if (0 != put_user(kargs.type, &uargs->type)) return -EFAULT;
+ }
+
+ return 0;
+}
+
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs)
+{
+ _mali_uk_post_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+
+ if (0 != get_user(kargs.type, &uargs->type)) {
+ return -EFAULT;
+ }
+
+ err = _mali_ukk_post_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs)
+{
+ _mali_uk_get_user_settings_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_get_user_settings(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = 0; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_user_settings_s))) return -EFAULT;
+
+ return 0;
+}
+
+int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_uk_request_high_priority_s __user *uargs)
+{
+ _mali_uk_request_high_priority_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_request_high_priority(&kargs);
+
+ kargs.ctx = 0;
+
+ return map_errcode(err);
+}
+
+int pending_submit_wrapper(struct mali_session_data *session_data, _mali_uk_pending_submit_s __user *uargs)
+{
+ _mali_uk_pending_submit_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_pending_submit(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_gp.c b/drivers/gpu/arm/utgard/linux/mali_ukk_gp.c
new file mode 100644
index 000000000000..d4144c0f5e48
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_gp.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs)
+{
+ _mali_osk_errcode_t err;
+
+ /* If the job was started successfully, 0 is returned. If there was an error, but the job
+ * was started, we return -ENOENT. For anything else returned, the job was not started. */
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ err = _mali_ukk_gp_start_job(session_data, uargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs)
+{
+ _mali_uk_get_gp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_get_gp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
+
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs)
+{
+ _mali_uk_gp_suspend_response_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_suspend_response_s))) return -EFAULT;
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_gp_suspend_response(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.cookie, &uargs->cookie)) return -EFAULT;
+
+ /* no known transactions to roll-back */
+ return 0;
+}
+
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_gp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_get_gp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_mem.c b/drivers/gpu/arm/utgard/linux/mali_ukk_mem.c
new file mode 100644
index 000000000000..ca1cba0585b3
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_mem.c
@@ -0,0 +1,333 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int mem_alloc_wrapper(struct mali_session_data *session_data, _mali_uk_alloc_mem_s __user *uargs)
+{
+ _mali_uk_alloc_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_alloc_mem_s))) {
+ return -EFAULT;
+ }
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_mem_allocate(&kargs);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.backend_handle, &uargs->backend_handle)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int mem_free_wrapper(struct mali_session_data *session_data, _mali_uk_free_mem_s __user *uargs)
+{
+ _mali_uk_free_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_free_mem_s))) {
+ return -EFAULT;
+ }
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_mem_free(&kargs);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.free_pages_nr, &uargs->free_pages_nr)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int mem_bind_wrapper(struct mali_session_data *session_data, _mali_uk_bind_mem_s __user *uargs)
+{
+ _mali_uk_bind_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_bind_mem_s))) {
+ return -EFAULT;
+ }
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_mem_bind(&kargs);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int mem_unbind_wrapper(struct mali_session_data *session_data, _mali_uk_unbind_mem_s __user *uargs)
+{
+ _mali_uk_unbind_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_unbind_mem_s))) {
+ return -EFAULT;
+ }
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_mem_unbind(&kargs);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+
+int mem_cow_wrapper(struct mali_session_data *session_data, _mali_uk_cow_mem_s __user *uargs)
+{
+ _mali_uk_cow_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_cow_mem_s))) {
+ return -EFAULT;
+ }
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_mem_cow(&kargs);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.backend_handle, &uargs->backend_handle)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int mem_cow_modify_range_wrapper(struct mali_session_data *session_data, _mali_uk_cow_modify_range_s __user *uargs)
+{
+ _mali_uk_cow_modify_range_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_cow_modify_range_s))) {
+ return -EFAULT;
+ }
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_mem_cow_modify_range(&kargs);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.change_pages_nr, &uargs->change_pages_nr)) {
+ return -EFAULT;
+ }
+ return 0;
+}
+
+
+int mem_resize_mem_wrapper(struct mali_session_data *session_data, _mali_uk_mem_resize_s __user *uargs)
+{
+ _mali_uk_mem_resize_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_mem_resize_s))) {
+ return -EFAULT;
+ }
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_mem_resize(&kargs);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user *uargs)
+{
+ _mali_uk_mem_write_safe_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_mem_write_safe_s))) {
+ return -EFAULT;
+ }
+
+ kargs.ctx = (uintptr_t)session_data;
+
+ /* Check if we can access the buffers */
+ if (!access_ok(VERIFY_WRITE, kargs.dest, kargs.size)
+ || !access_ok(VERIFY_READ, kargs.src, kargs.size)) {
+ return -EINVAL;
+ }
+
+ /* Check if size wraps */
+ if ((kargs.size + kargs.dest) <= kargs.dest
+ || (kargs.size + kargs.src) <= kargs.src) {
+ return -EINVAL;
+ }
+
+ err = _mali_ukk_mem_write_safe(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.size, &uargs->size)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+
+
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user *uargs)
+{
+ _mali_uk_query_mmu_page_table_dump_size_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_query_mmu_page_table_dump_size(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ return 0;
+}
+
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user *uargs)
+{
+ _mali_uk_dump_mmu_page_table_s kargs;
+ _mali_osk_errcode_t err;
+ void __user *user_buffer;
+ void *buffer = NULL;
+ int rc = -EFAULT;
+
+ /* validate input */
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ /* the session_data pointer was validated by caller */
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_dump_mmu_page_table_s)))
+ goto err_exit;
+
+ user_buffer = (void __user *)(uintptr_t)kargs.buffer;
+ if (!access_ok(VERIFY_WRITE, user_buffer, kargs.size))
+ goto err_exit;
+
+ /* allocate temporary buffer (kernel side) to store mmu page table info */
+ if (kargs.size <= 0)
+ return -EINVAL;
+ /* Allow at most 8MiB buffers, this is more than enough to dump a fully
+ * populated page table. */
+ if (kargs.size > SZ_8M)
+ return -EINVAL;
+
+ buffer = (void *)(uintptr_t)_mali_osk_valloc(kargs.size);
+ if (NULL == buffer) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ kargs.ctx = (uintptr_t)session_data;
+ kargs.buffer = (uintptr_t)buffer;
+ err = _mali_ukk_dump_mmu_page_table(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ rc = map_errcode(err);
+ goto err_exit;
+ }
+
+ /* copy mmu page table info back to user space and update pointers */
+ if (0 != copy_to_user(user_buffer, buffer, kargs.size))
+ goto err_exit;
+
+ kargs.register_writes = kargs.register_writes -
+ (uintptr_t)buffer + (uintptr_t)user_buffer;
+ kargs.page_table_dump = kargs.page_table_dump -
+ (uintptr_t)buffer + (uintptr_t)user_buffer;
+
+ if (0 != copy_to_user(uargs, &kargs, sizeof(kargs)))
+ goto err_exit;
+
+ rc = 0;
+
+err_exit:
+ if (buffer) _mali_osk_vfree(buffer);
+ return rc;
+}
+
+int mem_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs)
+{
+ _mali_osk_errcode_t err;
+ _mali_uk_profiling_memory_usage_get_s kargs;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_memory_usage_get_s))) {
+ return -EFAULT;
+ }
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_mem_usage_get(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_memory_usage_get_s))) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_pp.c b/drivers/gpu/arm/utgard/linux/mali_ukk_pp.c
new file mode 100644
index 000000000000..4c1c381cb90f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_pp.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs)
+{
+ _mali_osk_errcode_t err;
+
+ /* If the job was started successfully, 0 is returned. If there was an error, but the job
+ * was started, we return -ENOENT. For anything else returned, the job was not started. */
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ err = _mali_ukk_pp_start_job(session_data, uargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int pp_and_gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_and_gp_start_job_s __user *uargs)
+{
+ _mali_osk_errcode_t err;
+
+ /* If the jobs were started successfully, 0 is returned. If there was an error, but the
+ * jobs were started, we return -ENOENT. For anything else returned, the jobs were not
+ * started. */
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ err = _mali_ukk_pp_and_gp_start_job(session_data, uargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_pp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+
+ err = _mali_ukk_get_pp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_pp_number_of_cores_s))) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs)
+{
+ _mali_uk_get_pp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_get_pp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
+
+int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs)
+{
+ _mali_uk_pp_disable_wb_s kargs;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_disable_wb_s))) return -EFAULT;
+
+ kargs.ctx = (uintptr_t)session_data;
+ _mali_ukk_pp_job_disable_wb(&kargs);
+
+ return 0;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_profiling.c b/drivers/gpu/arm/utgard/linux/mali_ukk_profiling.c
new file mode 100644
index 000000000000..e84544dee07d
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_profiling.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+#include <linux/slab.h>
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs)
+{
+ _mali_uk_profiling_add_event_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_add_event_s))) {
+ return -EFAULT;
+ }
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_profiling_add_event(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs)
+{
+ _mali_uk_sw_counters_report_s kargs;
+ _mali_osk_errcode_t err;
+ u32 *counter_buffer;
+ u32 __user *counters;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_sw_counters_report_s))) {
+ return -EFAULT;
+ }
+
+ /* make sure that kargs.num_counters is [at least somewhat] sane */
+ if (kargs.num_counters > 10000) {
+ MALI_DEBUG_PRINT(1, ("User space attempted to allocate too many counters.\n"));
+ return -EINVAL;
+ }
+
+ counter_buffer = (u32 *)kmalloc(sizeof(u32) * kargs.num_counters, GFP_KERNEL);
+ if (NULL == counter_buffer) {
+ return -ENOMEM;
+ }
+
+ counters = (u32 *)(uintptr_t)kargs.counters;
+
+ if (0 != copy_from_user(counter_buffer, counters, sizeof(u32) * kargs.num_counters)) {
+ kfree(counter_buffer);
+ return -EFAULT;
+ }
+
+ kargs.ctx = (uintptr_t)session_data;
+ kargs.counters = (uintptr_t)counter_buffer;
+
+ err = _mali_ukk_sw_counters_report(&kargs);
+
+ kfree(counter_buffer);
+
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int profiling_get_stream_fd_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stream_fd_get_s __user *uargs)
+{
+ _mali_uk_profiling_stream_fd_get_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_stream_fd_get_s))) {
+ return -EFAULT;
+ }
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_profiling_stream_fd_get(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_stream_fd_get_s))) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_control_set_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_control_set_s __user *uargs)
+{
+ _mali_uk_profiling_control_set_s kargs;
+ _mali_osk_errcode_t err;
+ u8 *kernel_control_data = NULL;
+ u8 *kernel_response_data = NULL;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.control_packet_size, &uargs->control_packet_size)) return -EFAULT;
+ if (0 != get_user(kargs.response_packet_size, &uargs->response_packet_size)) return -EFAULT;
+
+ kargs.ctx = (uintptr_t)session_data;
+
+ if (0 != kargs.control_packet_size) {
+
+ kernel_control_data = _mali_osk_calloc(1, kargs.control_packet_size);
+ if (NULL == kernel_control_data) {
+ return -ENOMEM;
+ }
+
+ MALI_DEBUG_ASSERT(0 != kargs.response_packet_size);
+
+ kernel_response_data = _mali_osk_calloc(1, kargs.response_packet_size);
+ if (NULL == kernel_response_data) {
+ _mali_osk_free(kernel_control_data);
+ return -ENOMEM;
+ }
+
+ kargs.control_packet_data = (uintptr_t)kernel_control_data;
+ kargs.response_packet_data = (uintptr_t)kernel_response_data;
+
+ if (0 != copy_from_user((void *)(uintptr_t)kernel_control_data, (void *)(uintptr_t)uargs->control_packet_data, kargs.control_packet_size)) {
+ _mali_osk_free(kernel_control_data);
+ _mali_osk_free(kernel_response_data);
+ return -EFAULT;
+ }
+
+ err = _mali_ukk_profiling_control_set(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ _mali_osk_free(kernel_control_data);
+ _mali_osk_free(kernel_response_data);
+ return map_errcode(err);
+ }
+
+ if (0 != kargs.response_packet_size && 0 != copy_to_user(((void *)(uintptr_t)uargs->response_packet_data), ((void *)(uintptr_t)kargs.response_packet_data), kargs.response_packet_size)) {
+ _mali_osk_free(kernel_control_data);
+ _mali_osk_free(kernel_response_data);
+ return -EFAULT;
+ }
+
+ if (0 != put_user(kargs.response_packet_size, &uargs->response_packet_size)) {
+ _mali_osk_free(kernel_control_data);
+ _mali_osk_free(kernel_response_data);
+ return -EFAULT;
+ }
+
+ _mali_osk_free(kernel_control_data);
+ _mali_osk_free(kernel_response_data);
+ } else {
+
+ err = _mali_ukk_profiling_control_set(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ }
+ return 0;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_soft_job.c b/drivers/gpu/arm/utgard/linux/mali_ukk_soft_job.c
new file mode 100644
index 000000000000..11c70060e489
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_soft_job.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+#include "mali_soft_job.h"
+#include "mali_timeline.h"
+
+int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_start_s __user *uargs)
+{
+ _mali_uk_soft_job_start_s kargs;
+ u32 type, point;
+ u64 user_job;
+ struct mali_timeline_fence fence;
+ struct mali_soft_job *job = NULL;
+ u32 __user *job_id_ptr = NULL;
+
+ /* If the job was started successfully, 0 is returned. If there was an error, but the job
+ * was started, we return -ENOENT. For anything else returned, the job was not started. */
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session, -EINVAL);
+
+ MALI_DEBUG_ASSERT_POINTER(session->soft_job_system);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(kargs))) {
+ return -EFAULT;
+ }
+
+ type = kargs.type;
+ user_job = kargs.user_job;
+ job_id_ptr = (u32 __user *)(uintptr_t)kargs.job_id_ptr;
+
+ mali_timeline_fence_copy_uk_fence(&fence, &kargs.fence);
+
+ if ((MALI_SOFT_JOB_TYPE_USER_SIGNALED != type) && (MALI_SOFT_JOB_TYPE_SELF_SIGNALED != type)) {
+ MALI_DEBUG_PRINT_ERROR(("Invalid soft job type specified\n"));
+ return -EINVAL;
+ }
+
+ /* Create soft job. */
+ job = mali_soft_job_create(session->soft_job_system, (enum mali_soft_job_type)type, user_job);
+ if (unlikely(NULL == job)) {
+ return map_errcode(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* Write job id back to user space. */
+ if (0 != put_user(job->id, job_id_ptr)) {
+ MALI_PRINT_ERROR(("Mali Soft Job: failed to put job id"));
+ mali_soft_job_destroy(job);
+ return map_errcode(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* Start soft job. */
+ point = mali_soft_job_start(job, &fence);
+
+ if (0 != put_user(point, &uargs->point)) {
+ /* Let user space know that something failed after the job was started. */
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+int soft_job_signal_wrapper(struct mali_session_data *session, _mali_uk_soft_job_signal_s __user *uargs)
+{
+ u32 job_id;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (0 != get_user(job_id, &uargs->job_id)) return -EFAULT;
+
+ err = mali_soft_job_system_signal_job(session->soft_job_system, job_id);
+
+ return map_errcode(err);
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_timeline.c b/drivers/gpu/arm/utgard/linux/mali_ukk_timeline.c
new file mode 100644
index 000000000000..484d4041c869
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_timeline.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+#include "mali_timeline.h"
+#include "mali_timeline_fence_wait.h"
+#include "mali_timeline_sync_fence.h"
+
+int timeline_get_latest_point_wrapper(struct mali_session_data *session, _mali_uk_timeline_get_latest_point_s __user *uargs)
+{
+ u32 val;
+ mali_timeline_id timeline;
+ mali_timeline_point point;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (0 != get_user(val, &uargs->timeline)) return -EFAULT;
+
+ if (MALI_UK_TIMELINE_MAX <= val) {
+ return -EINVAL;
+ }
+
+ timeline = (mali_timeline_id)val;
+
+ point = mali_timeline_system_get_latest_point(session->timeline_system, timeline);
+
+ if (0 != put_user(point, &uargs->point)) return -EFAULT;
+
+ return 0;
+}
+
+int timeline_wait_wrapper(struct mali_session_data *session, _mali_uk_timeline_wait_s __user *uargs)
+{
+ u32 timeout, status;
+ mali_bool ret;
+ _mali_uk_fence_t uk_fence;
+ struct mali_timeline_fence fence;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (0 != copy_from_user(&uk_fence, &uargs->fence, sizeof(_mali_uk_fence_t))) return -EFAULT;
+ if (0 != get_user(timeout, &uargs->timeout)) return -EFAULT;
+
+ mali_timeline_fence_copy_uk_fence(&fence, &uk_fence);
+
+ ret = mali_timeline_fence_wait(session->timeline_system, &fence, timeout);
+ status = (MALI_TRUE == ret ? 1 : 0);
+
+ if (0 != put_user(status, &uargs->status)) return -EFAULT;
+
+ return 0;
+}
+
+int timeline_create_sync_fence_wrapper(struct mali_session_data *session, _mali_uk_timeline_create_sync_fence_s __user *uargs)
+{
+ s32 sync_fd = -1;
+ _mali_uk_fence_t uk_fence;
+ struct mali_timeline_fence fence;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (0 != copy_from_user(&uk_fence, &uargs->fence, sizeof(_mali_uk_fence_t))) return -EFAULT;
+ mali_timeline_fence_copy_uk_fence(&fence, &uk_fence);
+
+#if defined(CONFIG_SYNC)
+ sync_fd = mali_timeline_sync_fence_create(session->timeline_system, &fence);
+#else
+ sync_fd = -1;
+#endif /* defined(CONFIG_SYNC) */
+
+ if (0 != put_user(sync_fd, &uargs->sync_fd)) return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_vsync.c b/drivers/gpu/arm/utgard/linux/mali_ukk_vsync.c
new file mode 100644
index 000000000000..487c2478df4d
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_vsync.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_ukk_wrappers.h"
+
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs)
+{
+ _mali_uk_vsync_event_report_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_vsync_event_report_s))) {
+ return -EFAULT;
+ }
+
+ kargs.ctx = (uintptr_t)session_data;
+ err = _mali_ukk_vsync_event_report(&kargs);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/arm/utgard/linux/mali_ukk_wrappers.h b/drivers/gpu/arm/utgard/linux/mali_ukk_wrappers.h
new file mode 100644
index 000000000000..ac84e446bd12
--- /dev/null
+++ b/drivers/gpu/arm/utgard/linux/mali_ukk_wrappers.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk_wrappers.h
+ * Defines the wrapper functions for each user-kernel function
+ */
+
+#ifndef __MALI_UKK_WRAPPERS_H__
+#define __MALI_UKK_WRAPPERS_H__
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs);
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs);
+int get_api_version_v2_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_v2_s __user *uargs);
+int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs);
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs);
+int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_uk_request_high_priority_s __user *uargs);
+int pending_submit_wrapper(struct mali_session_data *session_data, _mali_uk_pending_submit_s __user *uargs);
+
+int mem_alloc_wrapper(struct mali_session_data *session_data, _mali_uk_alloc_mem_s __user *uargs);
+int mem_free_wrapper(struct mali_session_data *session_data, _mali_uk_free_mem_s __user *uargs);
+int mem_bind_wrapper(struct mali_session_data *session_data, _mali_uk_bind_mem_s __user *uargs);
+int mem_unbind_wrapper(struct mali_session_data *session_data, _mali_uk_unbind_mem_s __user *uargs);
+int mem_cow_wrapper(struct mali_session_data *session_data, _mali_uk_cow_mem_s __user *uargs);
+int mem_cow_modify_range_wrapper(struct mali_session_data *session_data, _mali_uk_cow_modify_range_s __user *uargs);
+int mem_resize_mem_wrapper(struct mali_session_data *session_data, _mali_uk_mem_resize_s __user *uargs);
+int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user *uargs);
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user *uargs);
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user *uargs);
+int mem_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs);
+
+int timeline_get_latest_point_wrapper(struct mali_session_data *session, _mali_uk_timeline_get_latest_point_s __user *uargs);
+int timeline_wait_wrapper(struct mali_session_data *session, _mali_uk_timeline_wait_s __user *uargs);
+int timeline_create_sync_fence_wrapper(struct mali_session_data *session, _mali_uk_timeline_create_sync_fence_s __user *uargs);
+int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_start_s __user *uargs);
+int soft_job_signal_wrapper(struct mali_session_data *session, _mali_uk_soft_job_signal_s __user *uargs);
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs);
+int pp_and_gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_and_gp_start_job_s __user *uargs);
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs);
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs);
+int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs);
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs);
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs);
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs);
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs);
+
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs);
+int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs);
+int profiling_get_stream_fd_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stream_fd_get_s __user *uargs);
+int profiling_control_set_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_control_set_s __user *uargs);
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs);
+
+
+int map_errcode(_mali_osk_errcode_t err);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_WRAPPERS_H__ */
diff --git a/drivers/gpu/arm/utgard/platform/arm/arm.c b/drivers/gpu/arm/utgard/platform/arm/arm.c
new file mode 100644
index 000000000000..41ad63c0793b
--- /dev/null
+++ b/drivers/gpu/arm/utgard/platform/arm/arm.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for:
+ * - Realview Versatile platforms with ARM11 Mpcore and virtex 5.
+ * - Versatile Express platforms with ARM Cortex-A9 and virtex 6.
+ */
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <linux/pm.h>
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+#include <asm/io.h>
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include <linux/dma-mapping.h>
+#include <linux/moduleparam.h>
+
+#include "arm_core_scaling.h"
+#include "mali_executor.h"
+
+
+static int mali_core_scaling_enable = 0;
+
+void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data);
+static u32 mali_read_phys(u32 phys_addr);
+#if defined(CONFIG_ARCH_REALVIEW)
+static void mali_write_phys(u32 phys_addr, u32 value);
+#endif
+
+#ifndef CONFIG_MALI_DT
+static void mali_platform_device_release(struct device *device);
+
+#if defined(CONFIG_ARCH_VEXPRESS)
+
+#if defined(CONFIG_ARM64)
+/* Juno + Mali-450 MP6 in V7 FPGA */
+static struct resource mali_gpu_resources_m450_mp6[] = {
+ MALI_GPU_RESOURCES_MALI450_MP6_PMU(0x6F040000, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200)
+};
+
+static struct resource mali_gpu_resources_m470_mp4[] = {
+ MALI_GPU_RESOURCES_MALI470_MP4_PMU(0x6F040000, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200)
+};
+
+static struct resource mali_gpu_resources_m470_mp3[] = {
+ MALI_GPU_RESOURCES_MALI470_MP3_PMU(0x6F040000, 200, 200, 200, 200, 200, 200, 200, 200, 200)
+};
+
+static struct resource mali_gpu_resources_m470_mp2[] = {
+ MALI_GPU_RESOURCES_MALI470_MP2_PMU(0x6F040000, 200, 200, 200, 200, 200, 200, 200)
+};
+
+static struct resource mali_gpu_resources_m470_mp1[] = {
+ MALI_GPU_RESOURCES_MALI470_MP1_PMU(0x6F040000, 200, 200, 200, 200, 200)
+};
+
+#else
+static struct resource mali_gpu_resources_m450_mp8[] = {
+ MALI_GPU_RESOURCES_MALI450_MP8_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
+};
+
+static struct resource mali_gpu_resources_m450_mp6[] = {
+ MALI_GPU_RESOURCES_MALI450_MP6_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
+};
+
+static struct resource mali_gpu_resources_m450_mp4[] = {
+ MALI_GPU_RESOURCES_MALI450_MP4_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
+};
+
+static struct resource mali_gpu_resources_m470_mp4[] = {
+ MALI_GPU_RESOURCES_MALI470_MP4_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
+};
+#endif /* CONFIG_ARM64 */
+
+#elif defined(CONFIG_ARCH_REALVIEW)
+
+static struct resource mali_gpu_resources_m300[] = {
+ MALI_GPU_RESOURCES_MALI300_PMU(0xC0000000, -1, -1, -1, -1)
+};
+
+static struct resource mali_gpu_resources_m400_mp1[] = {
+ MALI_GPU_RESOURCES_MALI400_MP1_PMU(0xC0000000, -1, -1, -1, -1)
+};
+
+static struct resource mali_gpu_resources_m400_mp2[] = {
+ MALI_GPU_RESOURCES_MALI400_MP2_PMU(0xC0000000, -1, -1, -1, -1, -1, -1)
+};
+
+#endif
+#endif
+
+static struct mali_gpu_device_data mali_gpu_data = {
+#ifndef CONFIG_MALI_DT
+ .pmu_switch_delay = 0xFF, /* do not have to be this high on FPGA, but it is good for testing to have a delay */
+ .max_job_runtime = 60000, /* 60 seconds */
+#if defined(CONFIG_ARCH_VEXPRESS)
+ .shared_mem_size = 256 * 1024 * 1024, /* 256MB */
+#endif
+#endif
+
+#if defined(CONFIG_ARCH_REALVIEW)
+ .dedicated_mem_start = 0x80000000, /* Physical start address (use 0xD0000000 for old indirect setup) */
+ .dedicated_mem_size = 0x10000000, /* 256MB */
+#endif
+#if defined(CONFIG_ARM64)
+ /* Some framebuffer drivers get the framebuffer dynamically, such as through GEM,
+ * in which the memory resource can't be predicted in advance.
+ */
+ .fb_start = 0x0,
+ .fb_size = 0xFFFFF000,
+#else
+ .fb_start = 0xe0000000,
+ .fb_size = 0x01000000,
+#endif
+ .control_interval = 1000, /* 1000ms */
+ .utilization_callback = mali_gpu_utilization_callback,
+ .get_clock_info = NULL,
+ .get_freq = NULL,
+ .set_freq = NULL,
+};
+
+#ifndef CONFIG_MALI_DT
+static struct platform_device mali_gpu_device = {
+ .name = MALI_GPU_NAME_UTGARD,
+ .id = 0,
+ .dev.release = mali_platform_device_release,
+ .dev.dma_mask = &mali_gpu_device.dev.coherent_dma_mask,
+ .dev.coherent_dma_mask = DMA_BIT_MASK(32),
+
+ .dev.platform_data = &mali_gpu_data,
+};
+
+int mali_platform_device_register(void)
+{
+ int err = -1;
+ int num_pp_cores = 0;
+#if defined(CONFIG_ARCH_REALVIEW)
+ u32 m400_gp_version;
+#endif
+
+ MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n"));
+
+ /* Detect present Mali GPU and connect the correct resources to the device */
+#if defined(CONFIG_ARCH_VEXPRESS)
+
+#if defined(CONFIG_ARM64)
+ mali_gpu_device.dev.archdata.dma_ops = dma_ops;
+ if ((mali_read_phys(0x6F000000) & 0x00600450) == 0x00600450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n"));
+ num_pp_cores = 6;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp6);
+ mali_gpu_device.resource = mali_gpu_resources_m450_mp6;
+ } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00400430) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP4 device\n"));
+ num_pp_cores = 4;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m470_mp4);
+ mali_gpu_device.resource = mali_gpu_resources_m470_mp4;
+ } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00300430) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP3 device\n"));
+ num_pp_cores = 3;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m470_mp3);
+ mali_gpu_device.resource = mali_gpu_resources_m470_mp3;
+ } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00200430) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP2 device\n"));
+ num_pp_cores = 2;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m470_mp2);
+ mali_gpu_device.resource = mali_gpu_resources_m470_mp2;
+ } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00100430) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP1 device\n"));
+ num_pp_cores = 1;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m470_mp1);
+ mali_gpu_device.resource = mali_gpu_resources_m470_mp1;
+ }
+#else
+ if (mali_read_phys(0xFC000000) == 0x00000450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP8 device\n"));
+ num_pp_cores = 8;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp8);
+ mali_gpu_device.resource = mali_gpu_resources_m450_mp8;
+ } else if (mali_read_phys(0xFC000000) == 0x40600450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n"));
+ num_pp_cores = 6;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp6);
+ mali_gpu_device.resource = mali_gpu_resources_m450_mp6;
+ } else if (mali_read_phys(0xFC000000) == 0x40400450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP4 device\n"));
+ num_pp_cores = 4;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp4);
+ mali_gpu_device.resource = mali_gpu_resources_m450_mp4;
+ } else if (mali_read_phys(0xFC000000) == 0xFFFFFFFF) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP4 device\n"));
+ num_pp_cores = 4;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m470_mp4);
+ mali_gpu_device.resource = mali_gpu_resources_m470_mp4;
+ }
+#endif /* CONFIG_ARM64 */
+
+#elif defined(CONFIG_ARCH_REALVIEW)
+
+ m400_gp_version = mali_read_phys(0xC000006C);
+ if ((m400_gp_version & 0xFFFF0000) == 0x0C070000) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-300 device\n"));
+ num_pp_cores = 1;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m300);
+ mali_gpu_device.resource = mali_gpu_resources_m300;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ } else if ((m400_gp_version & 0xFFFF0000) == 0x0B070000) {
+ u32 fpga_fw_version = mali_read_phys(0xC0010000);
+ if (fpga_fw_version == 0x130C008F || fpga_fw_version == 0x110C008F) {
+ /* Mali-400 MP1 r1p0 or r1p1 */
+ MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP1 device\n"));
+ num_pp_cores = 1;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m400_mp1);
+ mali_gpu_device.resource = mali_gpu_resources_m400_mp1;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ } else if (fpga_fw_version == 0x130C000F) {
+ /* Mali-400 MP2 r1p1 */
+ MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP2 device\n"));
+ num_pp_cores = 2;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m400_mp2);
+ mali_gpu_device.resource = mali_gpu_resources_m400_mp2;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ }
+ }
+
+#endif
+ /* Register the platform device */
+ err = platform_device_register(&mali_gpu_device);
+ if (0 == err) {
+#ifdef CONFIG_PM_RUNTIME
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ pm_runtime_set_autosuspend_delay(&(mali_gpu_device.dev), 1000);
+ pm_runtime_use_autosuspend(&(mali_gpu_device.dev));
+#endif
+ pm_runtime_enable(&(mali_gpu_device.dev));
+#endif
+ MALI_DEBUG_ASSERT(0 < num_pp_cores);
+ mali_core_scaling_init(num_pp_cores);
+
+ return 0;
+ }
+
+ return err;
+}
+
+void mali_platform_device_unregister(void)
+{
+ MALI_DEBUG_PRINT(4, ("mali_platform_device_unregister() called\n"));
+
+ mali_core_scaling_term();
+ platform_device_unregister(&mali_gpu_device);
+
+ platform_device_put(&mali_gpu_device);
+
+#if defined(CONFIG_ARCH_REALVIEW)
+ mali_write_phys(0xC0010020, 0x9); /* Restore default (legacy) memory mapping */
+#endif
+}
+
+static void mali_platform_device_release(struct device *device)
+{
+ MALI_DEBUG_PRINT(4, ("mali_platform_device_release() called\n"));
+}
+
+#else /* CONFIG_MALI_DT */
+int mali_platform_device_init(struct platform_device *device)
+{
+ int num_pp_cores = 0;
+ int err = -1;
+#if defined(CONFIG_ARCH_REALVIEW)
+ u32 m400_gp_version;
+#endif
+
+ /* Detect present Mali GPU and connect the correct resources to the device */
+#if defined(CONFIG_ARCH_VEXPRESS)
+
+#if defined(CONFIG_ARM64)
+ if ((mali_read_phys(0x6F000000) & 0x00600450) == 0x00600450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n"));
+ num_pp_cores = 6;
+ } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00400430) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP4 device\n"));
+ num_pp_cores = 4;
+ } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00300430) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP3 device\n"));
+ num_pp_cores = 3;
+ } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00200430) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP2 device\n"));
+ num_pp_cores = 2;
+ } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00100430) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP1 device\n"));
+ num_pp_cores = 1;
+ }
+#else
+ if (mali_read_phys(0xFC000000) == 0x00000450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP8 device\n"));
+ num_pp_cores = 8;
+ } else if (mali_read_phys(0xFC000000) == 0x40400450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP4 device\n"));
+ num_pp_cores = 4;
+ } else if (mali_read_phys(0xFC000000) == 0xFFFFFFFF) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP4 device\n"));
+ num_pp_cores = 4;
+ }
+#endif
+
+#elif defined(CONFIG_ARCH_REALVIEW)
+
+ m400_gp_version = mali_read_phys(0xC000006C);
+ if ((m400_gp_version & 0xFFFF0000) == 0x0C070000) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-300 device\n"));
+ num_pp_cores = 1;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ } else if ((m400_gp_version & 0xFFFF0000) == 0x0B070000) {
+ u32 fpga_fw_version = mali_read_phys(0xC0010000);
+ if (fpga_fw_version == 0x130C008F || fpga_fw_version == 0x110C008F) {
+ /* Mali-400 MP1 r1p0 or r1p1 */
+ MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP1 device\n"));
+ num_pp_cores = 1;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ } else if (fpga_fw_version == 0x130C000F) {
+ /* Mali-400 MP2 r1p1 */
+ MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP2 device\n"));
+ num_pp_cores = 2;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ }
+ }
+#endif
+
+ /* After kernel 3.15 device tree will default set dev
+ * related parameters in of_platform_device_create_pdata.
+ * But kernel changes from version to version,
+ * For example 3.10 didn't include device->dev.dma_mask parameter setting,
+ * if we didn't include here will cause dma_mapping error,
+ * but in kernel 3.15 it include device->dev.dma_mask parameter setting,
+ * so it's better to set must need paramter by DDK itself.
+ */
+ if (!device->dev.dma_mask)
+ device->dev.dma_mask = &device->dev.coherent_dma_mask;
+ device->dev.archdata.dma_ops = dma_ops;
+
+ err = platform_device_add_data(device, &mali_gpu_data, sizeof(mali_gpu_data));
+
+ if (0 == err) {
+#ifdef CONFIG_PM_RUNTIME
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ pm_runtime_set_autosuspend_delay(&(device->dev), 1000);
+ pm_runtime_use_autosuspend(&(device->dev));
+#endif
+ pm_runtime_enable(&(device->dev));
+#endif
+ MALI_DEBUG_ASSERT(0 < num_pp_cores);
+ mali_core_scaling_init(num_pp_cores);
+ }
+
+ return err;
+}
+
+int mali_platform_device_deinit(struct platform_device *device)
+{
+ MALI_IGNORE(device);
+
+ MALI_DEBUG_PRINT(4, ("mali_platform_device_deinit() called\n"));
+
+ mali_core_scaling_term();
+
+#if defined(CONFIG_ARCH_REALVIEW)
+ mali_write_phys(0xC0010020, 0x9); /* Restore default (legacy) memory mapping */
+#endif
+
+ return 0;
+}
+
+#endif /* CONFIG_MALI_DT */
+
+static u32 mali_read_phys(u32 phys_addr)
+{
+ u32 phys_addr_page = phys_addr & 0xFFFFE000;
+ u32 phys_offset = phys_addr & 0x00001FFF;
+ u32 map_size = phys_offset + sizeof(u32);
+ u32 ret = 0xDEADBEEF;
+ void *mem_mapped = ioremap_nocache(phys_addr_page, map_size);
+ if (NULL != mem_mapped) {
+ ret = (u32)ioread32(((u8 *)mem_mapped) + phys_offset);
+ iounmap(mem_mapped);
+ }
+
+ return ret;
+}
+
+#if defined(CONFIG_ARCH_REALVIEW)
+static void mali_write_phys(u32 phys_addr, u32 value)
+{
+ u32 phys_addr_page = phys_addr & 0xFFFFE000;
+ u32 phys_offset = phys_addr & 0x00001FFF;
+ u32 map_size = phys_offset + sizeof(u32);
+ void *mem_mapped = ioremap_nocache(phys_addr_page, map_size);
+ if (NULL != mem_mapped) {
+ iowrite32(value, ((u8 *)mem_mapped) + phys_offset);
+ iounmap(mem_mapped);
+ }
+}
+#endif
+
+static int param_set_core_scaling(const char *val, const struct kernel_param *kp)
+{
+ int ret = param_set_int(val, kp);
+
+ if (1 == mali_core_scaling_enable) {
+ mali_core_scaling_sync(mali_executor_get_num_cores_enabled());
+ }
+ return ret;
+}
+
+static struct kernel_param_ops param_ops_core_scaling = {
+ .set = param_set_core_scaling,
+ .get = param_get_int,
+};
+
+module_param_cb(mali_core_scaling_enable, &param_ops_core_scaling, &mali_core_scaling_enable, 0644);
+MODULE_PARM_DESC(mali_core_scaling_enable, "1 means to enable core scaling policy, 0 means to disable core scaling policy");
+
+void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data)
+{
+ if (1 == mali_core_scaling_enable) {
+ mali_core_scaling_update(data);
+ }
+}
diff --git a/drivers/gpu/arm/utgard/platform/arm/arm_core_scaling.c b/drivers/gpu/arm/utgard/platform/arm/arm_core_scaling.c
new file mode 100644
index 000000000000..2c24742eb4de
--- /dev/null
+++ b/drivers/gpu/arm/utgard/platform/arm/arm_core_scaling.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file arm_core_scaling.c
+ * Example core scaling policy.
+ */
+
+#include "arm_core_scaling.h"
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+
+#include <linux/workqueue.h>
+
+static int num_cores_total;
+static int num_cores_enabled;
+
+static struct work_struct wq_work;
+
+static void set_num_cores(struct work_struct *work)
+{
+ int err = mali_perf_set_num_pp_cores(num_cores_enabled);
+ MALI_DEBUG_ASSERT(0 == err);
+ MALI_IGNORE(err);
+}
+
+static void enable_one_core(void)
+{
+ if (num_cores_enabled < num_cores_total) {
+ ++num_cores_enabled;
+ schedule_work(&wq_work);
+ MALI_DEBUG_PRINT(3, ("Core scaling: Enabling one more core\n"));
+ }
+
+ MALI_DEBUG_ASSERT(1 <= num_cores_enabled);
+ MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled);
+}
+
+static void disable_one_core(void)
+{
+ if (1 < num_cores_enabled) {
+ --num_cores_enabled;
+ schedule_work(&wq_work);
+ MALI_DEBUG_PRINT(3, ("Core scaling: Disabling one core\n"));
+ }
+
+ MALI_DEBUG_ASSERT(1 <= num_cores_enabled);
+ MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled);
+}
+
+static void enable_max_num_cores(void)
+{
+ if (num_cores_enabled < num_cores_total) {
+ num_cores_enabled = num_cores_total;
+ schedule_work(&wq_work);
+ MALI_DEBUG_PRINT(3, ("Core scaling: Enabling maximum number of cores\n"));
+ }
+
+ MALI_DEBUG_ASSERT(num_cores_total == num_cores_enabled);
+}
+
+void mali_core_scaling_init(int num_pp_cores)
+{
+ INIT_WORK(&wq_work, set_num_cores);
+
+ num_cores_total = num_pp_cores;
+ num_cores_enabled = num_pp_cores;
+
+ /* NOTE: Mali is not fully initialized at this point. */
+}
+
+void mali_core_scaling_sync(int num_cores)
+{
+ num_cores_enabled = num_cores;
+}
+
+void mali_core_scaling_term(void)
+{
+ flush_scheduled_work();
+}
+
+#define PERCENT_OF(percent, max) ((int) ((percent)*(max)/100.0 + 0.5))
+
+void mali_core_scaling_update(struct mali_gpu_utilization_data *data)
+{
+ /*
+ * This function implements a very trivial PP core scaling algorithm.
+ *
+ * It is _NOT_ of production quality.
+ * The only intention behind this algorithm is to exercise and test the
+ * core scaling functionality of the driver.
+ * It is _NOT_ tuned for neither power saving nor performance!
+ *
+ * Other metrics than PP utilization need to be considered as well
+ * in order to make a good core scaling algorithm.
+ */
+
+ MALI_DEBUG_PRINT(3, ("Utilization: (%3d, %3d, %3d), cores enabled: %d/%d\n", data->utilization_gpu, data->utilization_gp, data->utilization_pp, num_cores_enabled, num_cores_total));
+
+ /* NOTE: this function is normally called directly from the utilization callback which is in
+ * timer context. */
+
+ if (PERCENT_OF(90, 256) < data->utilization_pp) {
+ enable_max_num_cores();
+ } else if (PERCENT_OF(50, 256) < data->utilization_pp) {
+ enable_one_core();
+ } else if (PERCENT_OF(40, 256) < data->utilization_pp) {
+ /* do nothing */
+ } else if (PERCENT_OF(0, 256) < data->utilization_pp) {
+ disable_one_core();
+ } else {
+ /* do nothing */
+ }
+}
diff --git a/drivers/gpu/arm/utgard/platform/arm/arm_core_scaling.h b/drivers/gpu/arm/utgard/platform/arm/arm_core_scaling.h
new file mode 100644
index 000000000000..325b5b1c6894
--- /dev/null
+++ b/drivers/gpu/arm/utgard/platform/arm/arm_core_scaling.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file arm_core_scaling.h
+ * Example core scaling policy.
+ */
+
+#ifndef __ARM_CORE_SCALING_H__
+#define __ARM_CORE_SCALING_H__
+
+struct mali_gpu_utilization_data;
+
+/**
+ * Initialize core scaling policy.
+ *
+ * @note The core scaling policy will assume that all PP cores are on initially.
+ *
+ * @param num_pp_cores Total number of PP cores.
+ */
+void mali_core_scaling_init(int num_pp_cores);
+
+/**
+ * Terminate core scaling policy.
+ */
+void mali_core_scaling_term(void);
+
+/**
+ * Update core scaling policy with new utilization data.
+ *
+ * @param data Utilization data.
+ */
+void mali_core_scaling_update(struct mali_gpu_utilization_data *data);
+
+void mali_core_scaling_sync(int num_cores);
+
+#endif /* __ARM_CORE_SCALING_H__ */
diff --git a/drivers/gpu/arm/utgard/platform/hikey/mali_hikey.c b/drivers/gpu/arm/utgard/platform/hikey/mali_hikey.c
new file mode 100644
index 000000000000..cc556b2656fe
--- /dev/null
+++ b/drivers/gpu/arm/utgard/platform/hikey/mali_hikey.c
@@ -0,0 +1,683 @@
+/*
+ * Copyright (C) 2014 Hisilicon Co. Ltd.
+ * Copyright (C) 2015 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+/**
+ * @file mali_hikey.c
+ * HiKey platform specific Mali driver functions.
+ */
+
+/* Set to 1 to enable ION (not tested yet). */
+#define HISI6220_USE_ION 0
+
+#define pr_fmt(fmt) "Mali: HiKey: " fmt
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/pm.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#if HISI6220_USE_ION
+#include <linux/hisi/hisi_ion.h>
+#endif
+#include <linux/byteorder/generic.h>
+
+#include <linux/mali/mali_utgard.h>
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_hikey_hi6220_registers_gpu.h"
+
+#define MALI_GPU_MHZ 1000000
+#define MALI_IRQ_ID 142
+#define MALI_FRAME_BUFFER_ADDR 0x3F100000
+#define MALI_FRAME_BUFFER_SIZE 0x00708000
+
+#define MALI_CALC_REG_MASK(bit_start, bit_end) \
+ (((0x1 << (bit_end - bit_start + 1)) - 1) << bit_start)
+
+enum mali_core_type {
+ MALI_CORE_400_MP1 = 0,
+ MALI_CORE_400_MP2 = 1,
+ MALI_CORE_450_MP4 = 2,
+ MALI_CORE_TYPE_MAX
+};
+
+enum mali_power_mode {
+ MALI_POWER_MODE_ON, /**< Power on */
+ MALI_POWER_MODE_LIGHT_SLEEP, /**< Idle for a short or PM suspend */
+ MALI_POWER_MODE_DEEP_SLEEP, /**< Idle for a long or OS suspend */
+};
+
+struct mali_soc_remap_addr_table {
+ u8 *soc_media_sctrl_base_addr;
+ u8 *soc_ao_sctrl_base_addr;
+ u8 *soc_peri_sctrl_base_addr;
+ u8 *soc_pmctl_base_addr;
+};
+
+static struct clk *mali_clk_g3d;
+static struct clk *mali_pclk_g3d;
+static struct regulator *mali_regulator;
+static struct device_node *mali_np;
+static bool mali_gpu_power_status;
+
+static struct resource mali_gpu_resources_m450_mp4[] = {
+ MALI_GPU_RESOURCES_MALI450_MP4(
+ SOC_G3D_S_BASE_ADDR, MALI_IRQ_ID, MALI_IRQ_ID, MALI_IRQ_ID,
+ MALI_IRQ_ID, MALI_IRQ_ID, MALI_IRQ_ID, MALI_IRQ_ID,
+ MALI_IRQ_ID, MALI_IRQ_ID, MALI_IRQ_ID, MALI_IRQ_ID)
+};
+
+static struct mali_soc_remap_addr_table *mali_soc_addr_table;
+
+static void mali_reg_writel(u8 *base_addr, unsigned int reg_offset,
+ unsigned char start_bit, unsigned char end_bit,
+ unsigned int val)
+{
+ int read_val;
+ unsigned long flags;
+ static DEFINE_SPINLOCK(reg_lock);
+ void __iomem *addr;
+
+ WARN_ON(!base_addr);
+
+ addr = base_addr + reg_offset;
+ spin_lock_irqsave(&reg_lock, flags);
+ read_val = readl(addr) & ~(MALI_CALC_REG_MASK(start_bit, end_bit));
+ read_val |= (MALI_CALC_REG_MASK(start_bit, end_bit)
+ & (val << start_bit));
+ writel(read_val, addr);
+ spin_unlock_irqrestore(&reg_lock, flags);
+}
+
+static unsigned int mali_reg_readl(u8 *base_addr, unsigned int reg_offset,
+ unsigned char start_bit,
+ unsigned char end_bit)
+{
+ unsigned int val;
+
+ WARN_ON(!base_addr);
+
+ val = readl((void __iomem *)(base_addr + reg_offset));
+ val &= MALI_CALC_REG_MASK(start_bit, end_bit);
+
+ return val >> start_bit;
+}
+
+static int mali_clock_on(void)
+{
+ u32 core_freq = 0;
+ u32 pclk_freq = 0;
+ int stat;
+
+ stat = clk_prepare_enable(mali_pclk_g3d);
+ if (stat)
+ return stat;
+
+ stat = of_property_read_u32(mali_np, "pclk_freq", &pclk_freq);
+ if (stat)
+ return stat;
+
+ stat = clk_set_rate(mali_pclk_g3d, pclk_freq * MALI_GPU_MHZ);
+ if (stat)
+ return stat;
+
+ stat = of_property_read_u32(mali_np, "mali_def_freq", &core_freq);
+ if (stat)
+ return stat;
+
+ stat = clk_set_rate(mali_clk_g3d, core_freq * MALI_GPU_MHZ);
+ if (stat)
+ return stat;
+
+ stat = clk_prepare_enable(mali_clk_g3d);
+ if (stat)
+ return stat;
+
+ mali_reg_writel(mali_soc_addr_table->soc_media_sctrl_base_addr,
+ SOC_MEDIA_SCTRL_SC_MEDIA_CLKDIS_ADDR(0), 17, 17, 1);
+
+ return 0;
+}
+
+static void mali_clock_off(void)
+{
+ clk_disable_unprepare(mali_clk_g3d);
+ clk_disable_unprepare(mali_pclk_g3d);
+}
+
+static int mali_domain_powerup_finish(void)
+{
+ unsigned int ret;
+
+ mali_reg_writel(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_RSTDIS0_ADDR(0), 1, 1, 1);
+ ret = mali_reg_readl(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_RST_STAT0_ADDR(0), 1, 1);
+ if (ret != 0) {
+ pr_err("SET SC_PW_RSTDIS0 failed!\n");
+ return -EFAULT;
+ }
+
+ mali_reg_writel(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_ISODIS0_ADDR(0), 1, 1, 1);
+ ret = mali_reg_readl(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_ISO_STAT0_ADDR(0), 1, 1);
+ if (ret != 0) {
+ pr_err("SET SC_PW_ISODIS0 failed!\n");
+ return -EFAULT;
+ }
+
+ mali_reg_writel(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_CLKEN0_ADDR(0), 1, 1, 1);
+ ret = mali_reg_readl(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_CLK_STAT0_ADDR(0), 1, 1);
+ if (ret != 1) {
+ pr_err("SET SC_PW_CLKEN0 failed!\n");
+ return -EFAULT;
+ }
+
+ mali_reg_writel(mali_soc_addr_table->soc_media_sctrl_base_addr,
+ SOC_MEDIA_SCTRL_SC_MEDIA_RSTDIS_ADDR(0), 0, 0, 1);
+ ret = mali_reg_readl(mali_soc_addr_table->soc_media_sctrl_base_addr,
+ SOC_MEDIA_SCTRL_SC_MEDIA_RST_STAT_ADDR(0), 0, 0);
+ if (ret != 0) {
+ pr_err("SET SC_MEDIA_RSTDIS failed!\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int mali_regulator_enable(void)
+{
+ int i, stat;
+
+ stat = regulator_enable(mali_regulator);
+ if (stat)
+ return stat;
+
+ for (i = 0; i < 50; i++) {
+ stat = regulator_is_enabled(mali_regulator);
+ if (stat > 0)
+ break;
+ udelay(1);
+ }
+
+ if (50 == i) {
+ pr_err("regulator enable timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int mali_platform_powerup(void)
+{
+ int stat;
+
+ if (mali_gpu_power_status)
+ return 0;
+
+ stat = mali_regulator_enable();
+ if (stat)
+ return stat;
+
+ stat = mali_clock_on();
+ if (stat)
+ return stat;
+
+ stat = mali_domain_powerup_finish();
+ if (stat)
+ return stat;
+
+ mali_gpu_power_status = true;
+
+ return 0;
+}
+
+static int mali_regulator_disable(void)
+{
+ mali_reg_writel(mali_soc_addr_table->soc_media_sctrl_base_addr,
+ SOC_MEDIA_SCTRL_SC_MEDIA_RSTEN_ADDR(0), 0, 0, 1);
+ mali_reg_writel(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_CLKDIS0_ADDR(0), 1, 1, 1);
+ mali_reg_writel(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_ISOEN0_ADDR(0), 1, 1, 1);
+ mali_reg_writel(mali_soc_addr_table->soc_ao_sctrl_base_addr,
+ SOC_AO_SCTRL_SC_PW_RSTEN0_ADDR(0), 1, 1, 1);
+
+ return regulator_disable(mali_regulator);
+}
+
+static int mali_platform_powerdown(void)
+{
+ int stat;
+
+ if (!mali_gpu_power_status)
+ return 0;
+
+ stat = mali_regulator_disable();
+ if (stat)
+ return stat;
+
+ mali_clock_off();
+ mali_gpu_power_status = false;
+
+ return 0;
+}
+
+static int mali_platform_power_mode_change(enum mali_power_mode power_mode)
+{
+ int stat;
+
+ switch (power_mode) {
+ case MALI_POWER_MODE_ON:
+ stat = mali_platform_powerup();
+ break;
+ case MALI_POWER_MODE_LIGHT_SLEEP:
+ case MALI_POWER_MODE_DEEP_SLEEP:
+ stat = mali_platform_powerdown();
+ break;
+ default:
+ pr_err("Invalid power mode\n");
+ stat = -EINVAL;
+ break;
+ }
+
+ return stat;
+}
+
+static int mali_os_suspend(struct device *device)
+{
+ int stat;
+
+ if (device->driver &&
+ device->driver->pm &&
+ device->driver->pm->suspend) {
+ stat = device->driver->pm->suspend(device);
+ } else {
+ stat = 0;
+ }
+
+ if (stat)
+ return stat;
+
+ return mali_platform_power_mode_change(MALI_POWER_MODE_DEEP_SLEEP);
+}
+
+static int mali_os_resume(struct device *device)
+{
+ int stat;
+
+ stat = mali_platform_power_mode_change(MALI_POWER_MODE_ON);
+ if (stat)
+ return stat;
+
+ if (device->driver &&
+ device->driver->pm &&
+ device->driver->pm->resume) {
+ stat = device->driver->pm->resume(device);
+ }
+
+ return stat;
+}
+
+static int mali_os_freeze(struct device *device)
+{
+ int stat;
+
+ if (device->driver &&
+ device->driver->pm &&
+ device->driver->pm->freeze) {
+ stat = device->driver->pm->freeze(device);
+ } else {
+ stat = 0;
+ }
+
+ return stat;
+}
+
+static int mali_os_thaw(struct device *device)
+{
+ int stat;
+
+ if (device->driver &&
+ device->driver->pm &&
+ device->driver->pm->thaw) {
+ stat = device->driver->pm->thaw(device);
+ } else {
+ stat = 0;
+ }
+
+ return stat;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int mali_runtime_suspend(struct device *device)
+{
+ int stat;
+
+ if (device->driver &&
+ device->driver->pm &&
+ device->driver->pm->runtime_suspend) {
+ stat = device->driver->pm->runtime_suspend(device);
+ } else {
+ stat = 0;
+ }
+
+ if (stat)
+ return stat;
+
+ return mali_platform_power_mode_change(MALI_POWER_MODE_LIGHT_SLEEP);
+}
+
+static int mali_runtime_resume(struct device *device)
+{
+ int stat;
+
+ stat = mali_platform_power_mode_change(MALI_POWER_MODE_ON);
+ if (stat)
+ return stat;
+
+ if (device->driver &&
+ device->driver->pm &&
+ device->driver->pm->runtime_resume) {
+ stat = device->driver->pm->runtime_resume(device);
+ }
+
+ return stat;
+}
+
+static int mali_runtime_idle(struct device *device)
+{
+ int stat;
+
+ if (device->driver &&
+ device->driver->pm &&
+ device->driver->pm->runtime_idle) {
+ stat = device->driver->pm->runtime_idle(device);
+ } else {
+ stat = 0;
+ }
+
+ if (stat)
+ return stat;
+
+ return pm_runtime_suspend(device);
+}
+#endif
+
+static int init_mali_clock_regulator(struct platform_device *pdev)
+{
+ int stat, ret;
+
+ BUG_ON(mali_regulator || mali_clk_g3d || mali_pclk_g3d);
+
+ /* regulator init */
+
+ mali_regulator = regulator_get(&pdev->dev, "G3D_PD_VDD");
+ if (IS_ERR(mali_regulator)) {
+ pr_err("failed to get G3D_PD_VDD\n");
+ return -ENODEV;
+ }
+
+ stat = mali_regulator_enable();
+ if (stat)
+ return stat;
+
+ mali_gpu_power_status = true;
+
+ /* clk init */
+
+ mali_clk_g3d = clk_get(&pdev->dev, "clk_g3d");
+ if (IS_ERR(mali_clk_g3d)) {
+ pr_err("failed to get source CLK_G3D\n");
+ return -ENODEV;
+ }
+
+ mali_pclk_g3d = clk_get(&pdev->dev, "pclk_g3d");
+ if (IS_ERR(mali_pclk_g3d)) {
+ pr_err("failed to get source PCLK_G3D\n");
+ return -ENODEV;
+ }
+
+ ret = mali_reg_readl(mali_soc_addr_table->soc_peri_sctrl_base_addr,
+ SOC_PERI_SCTRL_SC_PERIPH_CLKSTAT12_ADDR(0),
+ 10, 10);
+ if (ret != 1) {
+ mali_reg_writel(mali_soc_addr_table->soc_peri_sctrl_base_addr,
+ SOC_PERI_SCTRL_SC_PERIPH_CLKEN12_ADDR(0),
+ 10, 10, 1);
+ ret = mali_reg_readl(
+ mali_soc_addr_table->soc_peri_sctrl_base_addr,
+ SOC_PERI_SCTRL_SC_PERIPH_CLKSTAT12_ADDR(0), 10, 10);
+ if (ret != 1) {
+ pr_err("SET SC_PERIPH_CLKEN12 failed!\n");
+ return -EFAULT;
+ }
+ }
+
+ stat = mali_clock_on();
+ if (stat)
+ return stat;
+
+ mali_reg_writel(mali_soc_addr_table->soc_media_sctrl_base_addr,
+ SOC_MEDIA_SCTRL_SC_MEDIA_CLKCFG2_ADDR(0), 15, 15, 1);
+ ret = mali_reg_readl(mali_soc_addr_table->soc_media_sctrl_base_addr,
+ SOC_MEDIA_SCTRL_SC_MEDIA_CLKCFG2_ADDR(0), 15, 15);
+ if (ret != 1) {
+ pr_err("SET SC_MEDIA_CLKCFG2 failed!\n");
+ return -EFAULT;
+ }
+
+ return mali_domain_powerup_finish();
+}
+
+static int deinit_mali_clock_regulator(void)
+{
+ int stat;
+
+ BUG_ON(!mali_regulator || !mali_clk_g3d || !mali_pclk_g3d);
+
+ stat = mali_platform_powerdown();
+ if (stat)
+ return stat;
+
+ clk_put(mali_clk_g3d);
+ mali_clk_g3d = NULL;
+ clk_put(mali_pclk_g3d);
+ mali_pclk_g3d = NULL;
+ regulator_put(mali_regulator);
+ mali_regulator = NULL;
+
+ return 0;
+}
+
+static struct mali_gpu_device_data mali_gpu_data = {
+ .shared_mem_size = 1024 * 1024 * 1024, /* 1024MB */
+ .fb_start = MALI_FRAME_BUFFER_ADDR,
+ .fb_size = MALI_FRAME_BUFFER_SIZE,
+ .max_job_runtime = 2000, /* 2 seconds time out */
+ .control_interval = 50, /* 50ms */
+#ifdef CONFIG_MALI_DVFS
+ .utilization_callback = mali_gpu_utilization_proc,
+#endif
+};
+
+static const struct dev_pm_ops mali_gpu_device_type_pm_ops = {
+ .suspend = mali_os_suspend,
+ .resume = mali_os_resume,
+ .freeze = mali_os_freeze,
+ .thaw = mali_os_thaw,
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = mali_runtime_suspend,
+ .runtime_resume = mali_runtime_resume,
+ .runtime_idle = mali_runtime_idle,
+#endif
+};
+
+static struct device_type mali_gpu_device_device_type = {
+ .pm = &mali_gpu_device_type_pm_ops,
+};
+
+static enum mali_core_type mali_get_gpu_type(void)
+{
+ u32 gpu_type = MALI_CORE_TYPE_MAX;
+ int err = of_property_read_u32(mali_np, "mali_type", &gpu_type);
+
+ if (err) {
+ pr_err("failed to read mali_type from device tree\n");
+ return -EFAULT;
+ }
+
+ return gpu_type;
+}
+
+#if HISI6220_USE_ION
+static int mali_ion_mem_init(void)
+{
+ struct ion_heap_info_data mem_data;
+
+ if (hisi_ion_get_heap_info(ION_FB_HEAP_ID, &mem_data)) {
+ pr_err("Failed to get ION_FB_HEAP_ID\n");
+ return -EFAULT;
+ }
+
+ if (mem_data.heap_size == 0) {
+ pr_err("fb size is 0\n");
+ return -EINVAL;
+ }
+
+ mali_gpu_data.fb_size = mem_data.heap_size;
+ mali_gpu_data.fb_start = (unsigned long)(mem_data.heap_phy);
+ pr_debug("fb_size=0x%x, fb_start=0x%x\n",
+ mali_gpu_data.fb_size, mali_gpu_data.fb_start);
+
+ return 0;
+}
+#endif
+
+static int mali_remap_soc_addr(void)
+{
+ BUG_ON(mali_soc_addr_table);
+
+ mali_soc_addr_table = kmalloc(sizeof(struct mali_soc_remap_addr_table),
+ GFP_KERNEL);
+ if (!mali_soc_addr_table)
+ return -ENOMEM;
+
+ mali_soc_addr_table->soc_media_sctrl_base_addr =
+ ioremap(SOC_MEDIA_SCTRL_BASE_ADDR, REG_MEDIA_SC_IOSIZE);
+ mali_soc_addr_table->soc_ao_sctrl_base_addr =
+ ioremap(SOC_AO_SCTRL_BASE_ADDR, REG_SC_ON_IOSIZE);
+ mali_soc_addr_table->soc_peri_sctrl_base_addr =
+ ioremap(SOC_PERI_SCTRL_BASE_ADDR, REG_SC_OFF_IOSIZE);
+ mali_soc_addr_table->soc_pmctl_base_addr =
+ ioremap(SOC_PMCTRL_BASE_ADDR, REG_PMCTRL_IOSIZE);
+
+ if (!mali_soc_addr_table->soc_media_sctrl_base_addr
+ || !mali_soc_addr_table->soc_ao_sctrl_base_addr
+ || !mali_soc_addr_table->soc_peri_sctrl_base_addr
+ || !mali_soc_addr_table->soc_pmctl_base_addr) {
+ pr_err("Failed to remap SoC addresses\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mali_unmap_soc_addr(void)
+{
+ iounmap((void __iomem *)mali_soc_addr_table->soc_media_sctrl_base_addr);
+ iounmap((void __iomem *)mali_soc_addr_table->soc_ao_sctrl_base_addr);
+ iounmap((void __iomem *)mali_soc_addr_table->soc_peri_sctrl_base_addr);
+ iounmap((void __iomem *)mali_soc_addr_table->soc_pmctl_base_addr);
+ kfree(mali_soc_addr_table);
+ mali_soc_addr_table = NULL;
+}
+
+int mali_platform_device_init(struct platform_device *pdev)
+{
+ int stat;
+ int irq, i;
+
+#if HISI6220_USE_ION
+ stat = mali_ion_mem_init();
+ if (stat)
+ return stat;
+#endif
+
+ stat = mali_remap_soc_addr();
+ if (stat)
+ return stat;
+
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.type = &mali_gpu_device_device_type;
+ pdev->dev.platform_data = &mali_gpu_data;
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ mali_np = pdev->dev.of_node;
+
+ if (mali_get_gpu_type() != MALI_CORE_450_MP4) {
+ pr_err("Unexpected GPU type\n");
+ return -EINVAL;
+ }
+
+ /*
+ * We need to use DT to get the irq domain, so rewrite the static
+ * table with the irq given from platform_get_irq().
+ */
+ irq = platform_get_irq(pdev, 0);
+ for (i = 0; i < ARRAY_SIZE(mali_gpu_resources_m450_mp4); i++) {
+ if (IORESOURCE_IRQ & mali_gpu_resources_m450_mp4[i].flags) {
+ mali_gpu_resources_m450_mp4[i].start = irq;
+ mali_gpu_resources_m450_mp4[i].end = irq;
+ }
+ }
+ pdev->num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp4);
+ pdev->resource = mali_gpu_resources_m450_mp4;
+
+ stat = init_mali_clock_regulator(pdev);
+ if (stat)
+ return stat;
+
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_set_autosuspend_delay(&(pdev->dev), 1);
+ pm_runtime_use_autosuspend(&(pdev->dev));
+ pm_runtime_enable(&pdev->dev);
+#endif
+
+ return 0;
+}
+
+int mali_platform_device_deinit(void)
+{
+ int stat;
+
+ stat = deinit_mali_clock_regulator();
+ if (stat)
+ return stat;
+
+ mali_unmap_soc_addr();
+
+ return 0;
+}
diff --git a/drivers/gpu/arm/utgard/platform/hikey/mali_hikey_hi6220_registers_gpu.h b/drivers/gpu/arm/utgard/platform/hikey/mali_hikey_hi6220_registers_gpu.h
new file mode 100644
index 000000000000..0bdf4a0482fd
--- /dev/null
+++ b/drivers/gpu/arm/utgard/platform/hikey/mali_hikey_hi6220_registers_gpu.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2014 Hisilicon Co. Ltd.
+ * Copyright (C) 2015 ARM Ltd.
+ *
+ * Author: Xuzixin <Xuzixin@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef MALI_HIKEY_HI6220_REGISTERS_GPU_H
+#define MALI_HIKEY_HI6220_REGISTERS_GPU_H 1
+
+#include <linux/mm.h>
+
+#define SOC_G3D_S_BASE_ADDR 0xF4080000 /* G3D ctrl base addr */
+#define SOC_MEDIA_SCTRL_BASE_ADDR 0xF4410000 /* media ctrl base addr */
+#define REG_MEDIA_SC_IOSIZE PAGE_ALIGN(SZ_4K)
+#define SOC_PMCTRL_BASE_ADDR 0xF7032000 /* pm ctrl base addr */
+#define REG_PMCTRL_IOSIZE PAGE_ALIGN(SZ_4K)
+#define SOC_AO_SCTRL_BASE_ADDR 0xF7800000 /* ao ctrl base addr */
+#define SOC_PERI_SCTRL_BASE_ADDR 0xF7030000 /* peri ctrl base addr */
+#define REG_SC_ON_IOSIZE PAGE_ALIGN(SZ_8K)
+#define REG_SC_OFF_IOSIZE PAGE_ALIGN(SZ_4K)
+
+/* ----------------------------------------------------------------------------
+ * MEDIA SCTRL
+ */
+
+#define SOC_MEDIA_SCTRL_SC_MEDIA_SUBSYS_CTRL5_ADDR(base) ((base) + (0x51C))
+#define SOC_MEDIA_SCTRL_SC_MEDIA_CLKCFG0_ADDR(base) ((base) + (0xCBC))
+#define SOC_MEDIA_SCTRL_SC_MEDIA_CLKCFG2_ADDR(base) ((base) + (0xCC4))
+#define SOC_MEDIA_SCTRL_SC_MEDIA_CLKEN_ADDR(base) ((base) + (0x520))
+#define SOC_MEDIA_SCTRL_SC_MEDIA_CLKDIS_ADDR(base) ((base) + (0x524))
+#define SOC_MEDIA_SCTRL_SC_MEDIA_RSTEN_ADDR(base) ((base) + (0x52C))
+#define SOC_MEDIA_SCTRL_SC_MEDIA_RSTDIS_ADDR(base) ((base) + (0x530))
+#define SOC_MEDIA_SCTRL_SC_MEDIA_RST_STAT_ADDR(base) ((base) + (0x534))
+
+/* ----------------------------------------------------------------------------
+ * AO SCTRL,only bit 1 is necessary for GPU.
+ */
+
+#define SOC_AO_SCTRL_SC_PW_CLKEN0_ADDR(base) ((base) + (0x800))
+#define SOC_AO_SCTRL_SC_PW_CLKDIS0_ADDR(base) ((base) + (0x804))
+#define SOC_AO_SCTRL_SC_PW_CLK_STAT0_ADDR(base) ((base) + (0x808))
+#define SOC_AO_SCTRL_SC_PW_RSTEN0_ADDR(base) ((base) + (0x810))
+#define SOC_AO_SCTRL_SC_PW_RSTDIS0_ADDR(base) ((base) + (0x814))
+#define SOC_AO_SCTRL_SC_PW_RST_STAT0_ADDR(base) ((base) + (0x818))
+#define SOC_AO_SCTRL_SC_PW_ISOEN0_ADDR(base) ((base) + (0x820))
+#define SOC_AO_SCTRL_SC_PW_ISODIS0_ADDR(base) ((base) + (0x824))
+#define SOC_AO_SCTRL_SC_PW_ISO_STAT0_ADDR(base) ((base) + (0x828))
+#define SOC_AO_SCTRL_SC_PW_MTCMOS_EN0_ADDR(base) ((base) + (0x830))
+#define SOC_AO_SCTRL_SC_PW_MTCMOS_DIS0_ADDR(base) ((base) + (0x834))
+#define SOC_AO_SCTRL_SC_PW_MTCMOS_STAT0_ADDR(base) ((base) + (0x838))
+
+/* ----------------------------------------------------------------------------
+ * PERI SCTRL,only bit 10 is necessary for GPU.
+ */
+
+#define SOC_PERI_SCTRL_SC_PERIPH_CLKEN12_ADDR(base) ((base) + (0x270))
+#define SOC_PERI_SCTRL_SC_PERIPH_CLKSTAT12_ADDR(base) ((base) + (0x278))
+
+#endif /* MALI_HIKEY_HI6220_REGISTERS_GPU_H */
diff --git a/drivers/gpu/arm/utgard/readme.txt b/drivers/gpu/arm/utgard/readme.txt
new file mode 100644
index 000000000000..6785ac933b38
--- /dev/null
+++ b/drivers/gpu/arm/utgard/readme.txt
@@ -0,0 +1,28 @@
+Building the Mali Device Driver for Linux
+-----------------------------------------
+
+Build the Mali Device Driver for Linux by running the following make command:
+
+KDIR=<kdir_path> USING_UMP=<ump_option> BUILD=<build_option> make
+
+where
+ kdir_path: Path to your Linux Kernel directory
+ ump_option: 1 = Enable UMP support(*)
+ 0 = disable UMP support
+ build_option: debug = debug build of driver
+ release = release build of driver
+
+(*) For newer Linux Kernels, the Module.symvers file for the UMP device driver
+ must be available. The UMP_SYMVERS_FILE variable in the Makefile should
+ point to this file. This file is generated when the UMP driver is built.
+
+The result will be a mali.ko file, which can be loaded into the Linux kernel
+by using the insmod command.
+
+Use of UMP is not recommended. The dma-buf API in the Linux kernel has
+replaced UMP. The Mali Device Driver will be built with dma-buf support if the
+kernel config includes enabled dma-buf.
+
+The kernel needs to be provided with a platform_device struct for the Mali GPU
+device. See the mali_utgard.h header file for how to set up the Mali GPU
+resources.
diff --git a/drivers/gpu/arm/utgard/regs/mali_200_regs.h b/drivers/gpu/arm/utgard/regs/mali_200_regs.h
new file mode 100644
index 000000000000..e76f9926f5c0
--- /dev/null
+++ b/drivers/gpu/arm/utgard/regs/mali_200_regs.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI200_REGS_H_
+#define _MALI200_REGS_H_
+
+/**
+ * Enum for management register addresses.
+ */
+enum mali200_mgmt_reg {
+ MALI200_REG_ADDR_MGMT_VERSION = 0x1000,
+ MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x1004,
+ MALI200_REG_ADDR_MGMT_STATUS = 0x1008,
+ MALI200_REG_ADDR_MGMT_CTRL_MGMT = 0x100c,
+
+ MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x1020,
+ MALI200_REG_ADDR_MGMT_INT_CLEAR = 0x1024,
+ MALI200_REG_ADDR_MGMT_INT_MASK = 0x1028,
+ MALI200_REG_ADDR_MGMT_INT_STATUS = 0x102c,
+
+ MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x1050,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x1080,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x1084,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_LIMIT = 0x1088,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x108c,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x10a0,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x10a4,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x10ac,
+
+ MALI200_REG_ADDR_MGMT_PERFMON_CONTR = 0x10b0,
+ MALI200_REG_ADDR_MGMT_PERFMON_BASE = 0x10b4,
+
+ MALI200_REG_SIZEOF_REGISTER_BANK = 0x10f0
+
+};
+
+#define MALI200_REG_VAL_PERF_CNT_ENABLE 1
+
+enum mali200_mgmt_ctrl_mgmt {
+ MALI200_REG_VAL_CTRL_MGMT_STOP_BUS = (1 << 0),
+ MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES = (1 << 3),
+ MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET = (1 << 5),
+ MALI200_REG_VAL_CTRL_MGMT_START_RENDERING = (1 << 6),
+ MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET = (1 << 7), /* Only valid for Mali-300 and later */
+};
+
+enum mali200_mgmt_irq {
+ MALI200_REG_VAL_IRQ_END_OF_FRAME = (1 << 0),
+ MALI200_REG_VAL_IRQ_END_OF_TILE = (1 << 1),
+ MALI200_REG_VAL_IRQ_HANG = (1 << 2),
+ MALI200_REG_VAL_IRQ_FORCE_HANG = (1 << 3),
+ MALI200_REG_VAL_IRQ_BUS_ERROR = (1 << 4),
+ MALI200_REG_VAL_IRQ_BUS_STOP = (1 << 5),
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT = (1 << 6),
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT = (1 << 7),
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR = (1 << 8),
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND = (1 << 9),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW = (1 << 10),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW = (1 << 11),
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED = (1 << 12),
+};
+
+#define MALI200_REG_VAL_IRQ_MASK_ALL ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_END_OF_TILE |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT |\
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW |\
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED))
+
+#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW))
+
+#define MALI200_REG_VAL_IRQ_MASK_NONE ((enum mali200_mgmt_irq)(0))
+
+enum mali200_mgmt_status {
+ MALI200_REG_VAL_STATUS_RENDERING_ACTIVE = (1 << 0),
+ MALI200_REG_VAL_STATUS_BUS_STOPPED = (1 << 4),
+};
+
+enum mali200_render_unit {
+ MALI200_REG_ADDR_FRAME = 0x0000,
+ MALI200_REG_ADDR_RSW = 0x0004,
+ MALI200_REG_ADDR_STACK = 0x0030,
+ MALI200_REG_ADDR_STACK_SIZE = 0x0034,
+ MALI200_REG_ADDR_ORIGIN_OFFSET_X = 0x0040
+};
+
+enum mali200_wb_unit {
+ MALI200_REG_ADDR_WB0 = 0x0100,
+ MALI200_REG_ADDR_WB1 = 0x0200,
+ MALI200_REG_ADDR_WB2 = 0x0300
+};
+
+enum mali200_wb_unit_regs {
+ MALI200_REG_ADDR_WB_SOURCE_SELECT = 0x0000,
+ MALI200_REG_ADDR_WB_SOURCE_ADDR = 0x0004,
+};
+
+/* This should be in the top 16 bit of the version register of Mali PP */
+#define MALI200_PP_PRODUCT_ID 0xC807
+#define MALI300_PP_PRODUCT_ID 0xCE07
+#define MALI400_PP_PRODUCT_ID 0xCD07
+#define MALI450_PP_PRODUCT_ID 0xCF07
+#define MALI470_PP_PRODUCT_ID 0xCF08
+
+
+
+#endif /* _MALI200_REGS_H_ */
diff --git a/drivers/gpu/arm/utgard/regs/mali_gp_regs.h b/drivers/gpu/arm/utgard/regs/mali_gp_regs.h
new file mode 100644
index 000000000000..9c101f9ddd22
--- /dev/null
+++ b/drivers/gpu/arm/utgard/regs/mali_gp_regs.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2010, 2012-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALIGP2_CONROL_REGS_H_
+#define _MALIGP2_CONROL_REGS_H_
+
+/**
+ * These are the different geometry processor control registers.
+ * Their usage is to control and monitor the operation of the
+ * Vertex Shader and the Polygon List Builder in the geometry processor.
+ * Addresses are in 32-bit word relative sizes.
+ * @see [P0081] "Geometry Processor Data Structures" for details
+ */
+
+typedef enum {
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR = 0x00,
+ MALIGP2_REG_ADDR_MGMT_VSCL_END_ADDR = 0x04,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR = 0x08,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_END_ADDR = 0x0c,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR = 0x10,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR = 0x14,
+ MALIGP2_REG_ADDR_MGMT_CMD = 0x20,
+ MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT = 0x24,
+ MALIGP2_REG_ADDR_MGMT_INT_CLEAR = 0x28,
+ MALIGP2_REG_ADDR_MGMT_INT_MASK = 0x2C,
+ MALIGP2_REG_ADDR_MGMT_INT_STAT = 0x30,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x3C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x40,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x44,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x48,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x4C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x50,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_LIMIT = 0x54,
+ MALIGP2_REG_ADDR_MGMT_STATUS = 0x68,
+ MALIGP2_REG_ADDR_MGMT_VERSION = 0x6C,
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ = 0x80,
+ MALIGP2_REG_ADDR_MGMT_PLBCL_START_ADDR_READ = 0x84,
+ MALIGP2_CONTR_AXI_BUS_ERROR_STAT = 0x94,
+ MALIGP2_REGISTER_ADDRESS_SPACE_SIZE = 0x98,
+} maligp_reg_addr_mgmt_addr;
+
+#define MALIGP2_REG_VAL_PERF_CNT_ENABLE 1
+
+/**
+ * Commands to geometry processor.
+ * @see MALIGP2_CTRL_REG_CMD
+ */
+typedef enum {
+ MALIGP2_REG_VAL_CMD_START_VS = (1 << 0),
+ MALIGP2_REG_VAL_CMD_START_PLBU = (1 << 1),
+ MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC = (1 << 4),
+ MALIGP2_REG_VAL_CMD_RESET = (1 << 5),
+ MALIGP2_REG_VAL_CMD_FORCE_HANG = (1 << 6),
+ MALIGP2_REG_VAL_CMD_STOP_BUS = (1 << 9),
+ MALI400GP_REG_VAL_CMD_SOFT_RESET = (1 << 10), /* only valid for Mali-300 and later */
+} mgp_contr_reg_val_cmd;
+
+
+/** @defgroup MALIGP2_IRQ
+ * Interrupt status of geometry processor.
+ * @see MALIGP2_CTRL_REG_INT_RAWSTAT, MALIGP2_REG_ADDR_MGMT_INT_CLEAR,
+ * MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_ADDR_MGMT_INT_STAT
+ * @{
+ */
+#define MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST (1 << 0)
+#define MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST (1 << 1)
+#define MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM (1 << 2)
+#define MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ (1 << 3)
+#define MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ (1 << 4)
+#define MALIGP2_REG_VAL_IRQ_HANG (1 << 5)
+#define MALIGP2_REG_VAL_IRQ_FORCE_HANG (1 << 6)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT (1 << 7)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT (1 << 8)
+#define MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR (1 << 9)
+#define MALIGP2_REG_VAL_IRQ_SYNC_ERROR (1 << 10)
+#define MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR (1 << 11)
+#define MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED (1 << 12)
+#define MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD (1 << 13)
+#define MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD (1 << 14)
+#define MALI400GP_REG_VAL_IRQ_RESET_COMPLETED (1 << 19)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW (1 << 20)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW (1 << 21)
+#define MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS (1 << 22)
+
+/* Mask defining all IRQs in Mali GP */
+#define MALIGP2_REG_VAL_IRQ_MASK_ALL \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_RESET_COMPLETED | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+/* Mask defining the IRQs in Mali GP which we use */
+#define MALIGP2_REG_VAL_IRQ_MASK_USED \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+/* Mask defining non IRQs on MaliGP2*/
+#define MALIGP2_REG_VAL_IRQ_MASK_NONE 0
+
+/** }@ defgroup MALIGP2_IRQ*/
+
+/** @defgroup MALIGP2_STATUS
+ * The different Status values to the geometry processor.
+ * @see MALIGP2_CTRL_REG_STATUS
+ * @{
+ */
+#define MALIGP2_REG_VAL_STATUS_VS_ACTIVE 0x0002
+#define MALIGP2_REG_VAL_STATUS_BUS_STOPPED 0x0004
+#define MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE 0x0008
+#define MALIGP2_REG_VAL_STATUS_BUS_ERROR 0x0040
+#define MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR 0x0100
+/** }@ defgroup MALIGP2_STATUS*/
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ACTIVE (\
+ MALIGP2_REG_VAL_STATUS_VS_ACTIVE|\
+ MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE)
+
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ERROR (\
+ MALIGP2_REG_VAL_STATUS_BUS_ERROR |\
+ MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR )
+
+/* This should be in the top 16 bit of the version register of gp.*/
+#define MALI200_GP_PRODUCT_ID 0xA07
+#define MALI300_GP_PRODUCT_ID 0xC07
+#define MALI400_GP_PRODUCT_ID 0xB07
+#define MALI450_GP_PRODUCT_ID 0xD07
+
+/**
+ * The different sources for instrumented on the geometry processor.
+ * @see MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC
+ */
+
+enum MALIGP2_cont_reg_perf_cnt_src {
+ MALIGP2_REG_VAL_PERF_CNT1_SRC_NUMBER_OF_VERTICES_PROCESSED = 0x0a,
+};
+
+#endif
diff --git a/drivers/gpu/arm/utgard/timestamp-arm11-cc/mali_timestamp.c b/drivers/gpu/arm/utgard/timestamp-arm11-cc/mali_timestamp.c
new file mode 100644
index 000000000000..a486e2f7684f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/timestamp-arm11-cc/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011, 2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/gpu/arm/utgard/timestamp-arm11-cc/mali_timestamp.h b/drivers/gpu/arm/utgard/timestamp-arm11-cc/mali_timestamp.h
new file mode 100644
index 000000000000..65f3ab274c09
--- /dev/null
+++ b/drivers/gpu/arm/utgard/timestamp-arm11-cc/mali_timestamp.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ /*
+ * reset counters and overflow flags
+ */
+
+ u32 mask = (1 << 0) | /* enable all three counters */
+ (0 << 1) | /* reset both Count Registers to 0x0 */
+ (1 << 2) | /* reset the Cycle Counter Register to 0x0 */
+ (0 << 3) | /* 1 = Cycle Counter Register counts every 64th processor clock cycle */
+ (0 << 4) | /* Count Register 0 interrupt enable */
+ (0 << 5) | /* Count Register 1 interrupt enable */
+ (0 << 6) | /* Cycle Counter interrupt enable */
+ (0 << 8) | /* Count Register 0 overflow flag (clear or write, flag on read) */
+ (0 << 9) | /* Count Register 1 overflow flag (clear or write, flag on read) */
+ (1 << 10); /* Cycle Counter Register overflow flag (clear or write, flag on read) */
+
+ __asm__ __volatile__("MCR p15, 0, %0, c15, c12, 0" : : "r"(mask));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ u32 result;
+
+ /* this is for the clock cycles */
+ __asm__ __volatile__("MRC p15, 0, %0, c15, c12, 1" : "=r"(result));
+
+ return (u64)result;
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/gpu/arm/utgard/timestamp-default/mali_timestamp.c b/drivers/gpu/arm/utgard/timestamp-default/mali_timestamp.c
new file mode 100644
index 000000000000..a486e2f7684f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/timestamp-default/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010-2011, 2013, 2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/gpu/arm/utgard/timestamp-default/mali_timestamp.h b/drivers/gpu/arm/utgard/timestamp-default/mali_timestamp.h
new file mode 100644
index 000000000000..8ba47060828f
--- /dev/null
+++ b/drivers/gpu/arm/utgard/timestamp-default/mali_timestamp.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010-2011, 2013-2015 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ return _mali_osk_boot_time_get_ns();
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c4bf9a1cf4a6..038aae8c7c66 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -97,6 +97,14 @@ config DRM_KMS_CMA_HELPER
help
Choose this if you need the KMS CMA helper functions
+config DRM_CMA_FBDEV_BUFFER_NUM
+ int "Cma Fbdev Buffer Number"
+ depends on DRM_KMS_CMA_HELPER
+ default 1
+ help
+ Defines the buffer number of cma fbdev. Default is one buffer.
+ For double buffer please set to 2 and 3 for triple buffer.
+
source "drivers/gpu/drm/i2c/Kconfig"
config DRM_TDFX
@@ -266,3 +274,5 @@ source "drivers/gpu/drm/amd/amdkfd/Kconfig"
source "drivers/gpu/drm/imx/Kconfig"
source "drivers/gpu/drm/vc4/Kconfig"
+
+source "drivers/gpu/drm/hisilicon/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 1e9ff4c3e3db..e7efcb7327f7 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -75,3 +75,4 @@ obj-y += i2c/
obj-y += panel/
obj-y += bridge/
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
+obj-$(CONFIG_DRM_HISI) += hisilicon/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 04c270757030..ca066018ea34 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -22,7 +22,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
# add asic specific block
-amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
+amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
amdgpu_amdkfd_gfx_v7.o
@@ -31,6 +31,7 @@ amdgpu-y += \
# add GMC block
amdgpu-y += \
+ gmc_v7_0.o \
gmc_v8_0.o
# add IH block
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 048cfe073dae..bb1099c549df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -604,8 +604,6 @@ struct amdgpu_sa_manager {
uint32_t align;
};
-struct amdgpu_sa_bo;
-
/* sub-allocation buffer */
struct amdgpu_sa_bo {
struct list_head olist;
@@ -2314,6 +2312,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+ unsigned long end);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 5a8fbadbd27b..8ac49812a716 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -63,6 +63,10 @@ bool amdgpu_has_atpx(void) {
return amdgpu_atpx_priv.atpx_detected;
}
+bool amdgpu_has_atpx_dgpu_power_cntl(void) {
+ return amdgpu_atpx_priv.atpx.functions.power_cntl;
+}
+
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -142,10 +146,6 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
*/
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
{
- /* make sure required functions are enabled */
- /* dGPU power control is required */
- atpx->functions.power_cntl = true;
-
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 89c3dd62ba21..119cdc2c43e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
/* Don't try to start link training before we
* have the dpcd */
- if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
+ if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
return;
/* set it to OFF so that drm_helper_connector_dpms()
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d5b421330145..9d88023df836 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -61,6 +61,12 @@ static const char *amdgpu_asic_name[] = {
"LAST",
};
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool amdgpu_has_atpx_dgpu_power_cntl(void);
+#else
+static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
+#endif
+
bool amdgpu_device_is_px(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
@@ -1469,7 +1475,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_runtime_pm == 1)
runtime = true;
- if (amdgpu_device_is_px(ddev))
+ if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
runtime = true;
vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
if (runtime)
@@ -1744,15 +1750,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
}
/* post card */
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ if (!amdgpu_card_posted(adev))
+ amdgpu_atom_asic_init(adev->mode_info.atom_context);
r = amdgpu_resume(adev);
+ if (r)
+ DRM_ERROR("amdgpu_resume failed (%d).\n", r);
amdgpu_fence_driver_resume(adev);
- r = amdgpu_ib_ring_tests(adev);
- if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
+ if (resume) {
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
+ }
r = amdgpu_late_init(adev);
if (r)
@@ -1788,6 +1799,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
}
drm_kms_helper_poll_enable(dev);
+ drm_helper_hpd_irq_event(dev);
if (fbcon) {
amdgpu_fbdev_set_suspend(adev, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 5580d3420c3a..82903ca78529 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -72,8 +72,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct drm_crtc *crtc = &amdgpuCrtc->base;
unsigned long flags;
- unsigned i;
- int vpos, hpos, stat, min_udelay;
+ unsigned i, repcnt = 4;
+ int vpos, hpos, stat, min_udelay = 0;
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
amdgpu_flip_wait_fence(adev, &work->excl);
@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small.
*/
- for (;;) {
+ while (amdgpuCrtc->enabled && --repcnt) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in
* vpos.
@@ -112,12 +112,24 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
break;
/* Sleep at least until estimated real start of hw vblank */
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+ if (min_udelay > vblank->framedur_ns / 2000) {
+ /* Don't wait ridiculously long - something is wrong */
+ repcnt = 0;
+ break;
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
};
+ if (!repcnt)
+ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
+ "framedur %d, linedur %d, stat %d, vpos %d, "
+ "hpos %d\n", work->crtc_id, min_udelay,
+ vblank->framedur_ns / 1000,
+ vblank->linedur_ns / 1000, stat, vpos, hpos);
+
/* do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
/* set the flip status */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0508c5cd103a..8d6668cedf6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -250,11 +250,11 @@ static struct pci_device_id pciidlist[] = {
{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
#endif
/* topaz */
- {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+ {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+ {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+ {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+ {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
/* tonga */
{0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
{0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index b1969f2b2038..d4e2780c0796 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
list_for_each_entry(bo, &node->bos, mn_list) {
- if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
+ if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
+ end))
continue;
r = amdgpu_bo_reserve(bo, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index c3ce103b6a33..b8fbbd7699e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
+#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
@@ -261,6 +262,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
AMDGPU_GEM_DOMAIN_OA);
bo->flags = flags;
+
+ /* For architectures that don't support WC memory,
+ * mask out the WC flag from the BO
+ */
+ if (!drm_arch_can_wc_memory())
+ bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+
amdgpu_fill_placement_to_bo(bo, placement);
/* Kernel allocation are uninterruptible */
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
@@ -399,7 +407,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
}
if (fpfn > bo->placements[i].fpfn)
bo->placements[i].fpfn = fpfn;
- if (lpfn && lpfn < bo->placements[i].lpfn)
+ if (!bo->placements[i].lpfn ||
+ (lpfn && lpfn < bo->placements[i].lpfn))
bo->placements[i].lpfn = lpfn;
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 22a8c7d3a3ab..7ae15fad16ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -595,11 +595,6 @@ force:
/* update display watermarks based on new power state */
amdgpu_display_bandwidth_update(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
-
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
/* wait for the rings to drain */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -616,6 +611,12 @@ force:
amdgpu_dpm_post_set_power_state(adev);
+ /* update displays */
+ amdgpu_dpm_display_configuration_changed(adev);
+
+ adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
+ adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
+
if (adev->pm.funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) {
enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 8b88edb0434b..ca72a2e487b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
if (fences[i])
- fences[count++] = fences[i];
+ fences[count++] = fence_get(fences[i]);
if (count) {
spin_unlock(&sa_manager->wq.lock);
t = fence_wait_any_timeout(fences, count, false,
MAX_SCHEDULE_TIMEOUT);
+ for (i = 0; i < count; ++i)
+ fence_put(fences[i]);
+
r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index dd005c336c97..181ce39ef5e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -293,7 +293,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
fence = to_amdgpu_fence(sync->sync_to[i]);
/* check if we really need to sync */
- if (!amdgpu_fence_need_sync(fence, ring))
+ if (!amdgpu_enable_scheduler &&
+ !amdgpu_fence_need_sync(fence, ring))
continue;
/* prevent GPU deadlocks */
@@ -303,7 +304,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
}
if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
- r = fence_wait(&fence->base, true);
+ r = fence_wait(sync->sync_to[i], true);
if (r)
return r;
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8a1752ff3d8e..1cbb16e15307 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -712,7 +712,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
- while (--i) {
+ while (i--) {
pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
gtt->ttm.dma_address[i] = 0;
@@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
return !!gtt->userptr;
}
+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+ unsigned long end)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ unsigned long size;
+
+ if (gtt == NULL)
+ return false;
+
+ if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
+ return false;
+
+ size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
+ if (gtt->userptr > end || gtt->userptr + size <= start)
+ return false;
+
+ return true;
+}
+
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -808,7 +827,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
flags |= AMDGPU_PTE_SNOOPED;
}
- if (adev->asic_type >= CHIP_TOPAZ)
+ if (adev->asic_type >= CHIP_TONGA)
flags |= AMDGPU_PTE_EXECUTABLE;
flags |= AMDGPU_PTE_READABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b53d273eb7a1..8c5ec151ddac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1010,13 +1010,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
return -EINVAL;
/* make sure object fit at this offset */
- eaddr = saddr + size;
+ eaddr = saddr + size - 1;
if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
return -EINVAL;
last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
- if (last_pfn > adev->vm_manager.max_pfn) {
- dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
+ if (last_pfn >= adev->vm_manager.max_pfn) {
+ dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
last_pfn, adev->vm_manager.max_pfn);
return -EINVAL;
}
@@ -1025,7 +1025,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
eaddr /= AMDGPU_GPU_PAGE_SIZE;
spin_lock(&vm->it_lock);
- it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
+ it = interval_tree_iter_first(&vm->va, saddr, eaddr);
spin_unlock(&vm->it_lock);
if (it) {
struct amdgpu_bo_va_mapping *tmp;
@@ -1046,7 +1046,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
INIT_LIST_HEAD(&mapping->list);
mapping->it.start = saddr;
- mapping->it.last = eaddr - 1;
+ mapping->it.last = eaddr;
mapping->offset = offset;
mapping->flags = flags;
@@ -1248,7 +1248,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
AMDGPU_VM_PTE_COUNT * 8);
- unsigned pd_size, pd_entries, pts_size;
+ unsigned pd_size, pd_entries;
int i, r;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -1266,8 +1266,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
pd_entries = amdgpu_vm_num_pdes(adev);
/* allocate page table array */
- pts_size = pd_entries * sizeof(struct amdgpu_vm_pt);
- vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
+ vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
if (vm->page_tables == NULL) {
DRM_ERROR("Cannot allocate memory for page table array\n");
return -ENOMEM;
@@ -1327,7 +1326,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
amdgpu_bo_unref(&vm->page_tables[i].bo);
- kfree(vm->page_tables);
+ drm_free_large(vm->page_tables);
amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 72793f93e2fc..aa491540ba85 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3628,6 +3628,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ amdgpu_ring_write(ring, seq);
+ amdgpu_ring_write(ring, 0xffffffff);
+ amdgpu_ring_write(ring, 4); /* poll interval */
+
if (usepfp) {
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index e1dcab98e249..d1054034d14b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -90,7 +90,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
MODULE_FIRMWARE("amdgpu/topaz_me.bin");
MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
-MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
@@ -807,7 +806,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- if (adev->asic_type != CHIP_STONEY) {
+ if ((adev->asic_type != CHIP_STONEY) &&
+ (adev->asic_type != CHIP_TOPAZ)) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
if (!err) {
@@ -4681,7 +4681,8 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
- WAIT_REG_MEM_FUNCTION(3))); /* equal */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index ed8abb58a785..ea87033bfaf6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
MODULE_FIRMWARE("radeon/bonaire_mc.bin");
MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
+
+static const u32 golden_settings_iceland_a11[] =
+{
+ mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
+};
+
+static const u32 iceland_mgcg_cgcg_init[] =
+{
+ mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+};
+
+static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ amdgpu_program_register_sequence(adev,
+ iceland_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ golden_settings_iceland_a11,
+ (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
+ break;
+ default:
+ break;
+ }
+}
/**
- * gmc8_mc_wait_for_idle - wait for MC idle callback.
+ * gmc7_mc_wait_for_idle - wait for MC idle callback.
*
* @adev: amdgpu_device pointer
*
@@ -132,13 +162,20 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
case CHIP_HAWAII:
chip_name = "hawaii";
break;
+ case CHIP_TOPAZ:
+ chip_name = "topaz";
+ break;
case CHIP_KAVERI:
case CHIP_KABINI:
return 0;
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ if (adev->asic_type == CHIP_TOPAZ)
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+
err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
if (err)
goto out;
@@ -861,14 +898,6 @@ static int gmc_v7_0_early_init(void *handle)
gmc_v7_0_set_gart_funcs(adev);
gmc_v7_0_set_irq_funcs(adev);
- if (adev->flags & AMD_IS_APU) {
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
- } else {
- u32 tmp = RREG32(mmMC_SEQ_MISC0);
- tmp &= MC_SEQ_MISC0__MT__MASK;
- adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
- }
-
return 0;
}
@@ -889,6 +918,14 @@ static int gmc_v7_0_sw_init(void *handle)
if (r)
return r;
+ if (adev->flags & AMD_IS_APU) {
+ adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ } else {
+ u32 tmp = RREG32(mmMC_SEQ_MISC0);
+ tmp &= MC_SEQ_MISC0__MT__MASK;
+ adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
+ }
+
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
if (r)
return r;
@@ -980,6 +1017,8 @@ static int gmc_v7_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ gmc_v7_0_init_golden_registers(adev);
+
gmc_v7_0_mc_program(adev);
if (!(adev->flags & AMD_IS_APU)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index d39028440814..08423089fb84 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -42,9 +42,7 @@
static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
-MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
-MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
static const u32 golden_settings_tonga_a11[] =
{
@@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
-static const u32 golden_settings_iceland_a11[] =
-{
- mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
- mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
- mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
- mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
-};
-
-static const u32 iceland_mgcg_cgcg_init[] =
-{
- mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
-};
-
static const u32 cz_mgcg_cgcg_init[] =
{
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
@@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
- case CHIP_TOPAZ:
- amdgpu_program_register_sequence(adev,
- iceland_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_iceland_a11,
- (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
- break;
case CHIP_FIJI:
amdgpu_program_register_sequence(adev,
fiji_mgcg_cgcg_init,
@@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
switch (adev->asic_type) {
- case CHIP_TOPAZ:
- chip_name = "topaz";
- break;
case CHIP_TONGA:
chip_name = "tonga";
break;
case CHIP_FIJI:
- chip_name = "fiji";
- break;
case CHIP_CARRIZO:
case CHIP_STONEY:
return 0;
@@ -880,14 +852,6 @@ static int gmc_v8_0_early_init(void *handle)
gmc_v8_0_set_gart_funcs(adev);
gmc_v8_0_set_irq_funcs(adev);
- if (adev->flags & AMD_IS_APU) {
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
- } else {
- u32 tmp = RREG32(mmMC_SEQ_MISC0);
- tmp &= MC_SEQ_MISC0__MT__MASK;
- adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
- }
-
return 0;
}
@@ -898,6 +862,8 @@ static int gmc_v8_0_late_init(void *handle)
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
}
+#define mmMC_SEQ_MISC0_FIJI 0xA71
+
static int gmc_v8_0_sw_init(void *handle)
{
int r;
@@ -908,6 +874,19 @@ static int gmc_v8_0_sw_init(void *handle)
if (r)
return r;
+ if (adev->flags & AMD_IS_APU) {
+ adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ } else {
+ u32 tmp;
+
+ if (adev->asic_type == CHIP_FIJI)
+ tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
+ else
+ tmp = RREG32(mmMC_SEQ_MISC0);
+ tmp &= MC_SEQ_MISC0__MT__MASK;
+ adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
+ }
+
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
if (r)
return r;
@@ -1003,7 +982,7 @@ static int gmc_v8_0_hw_init(void *handle)
gmc_v8_0_mc_program(adev);
- if (!(adev->flags & AMD_IS_APU)) {
+ if (adev->asic_type == CHIP_TONGA) {
r = gmc_v8_0_mc_load_microcode(adev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index 966d4b2ed9da..090486c18249 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
case AMDGPU_UCODE_ID_CP_ME:
return UCODE_ID_CP_ME_MASK;
case AMDGPU_UCODE_ID_CP_MEC1:
- return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK;
+ return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
case AMDGPU_UCODE_ID_CP_MEC2:
return UCODE_ID_CP_MEC_MASK;
case AMDGPU_UCODE_ID_RLC_G:
@@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
return -EINVAL;
}
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
- return -EINVAL;
- }
-
if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
&toc->entry[toc->num_entries++])) {
DRM_ERROR("Failed to get firmware entry for SDMA0\n");
@@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
UCODE_ID_CP_ME_MASK |
UCODE_ID_CP_PFP_MASK |
UCODE_ID_CP_MEC_MASK |
- UCODE_ID_CP_MEC_JT1_MASK |
- UCODE_ID_CP_MEC_JT2_MASK;
+ UCODE_ID_CP_MEC_JT1_MASK;
+
if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
DRM_ERROR("Fail to request SMU load ucode\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 2cf50180cc51..b1c7a9b3631b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -32,8 +32,8 @@
#include "oss/oss_2_4_d.h"
#include "oss/oss_2_4_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_enum.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
index 204903897b4f..63d6cb3c1110 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
@@ -122,25 +122,12 @@ static int tonga_dpm_hw_fini(void *handle)
static int tonga_dpm_suspend(void *handle)
{
- return 0;
+ return tonga_dpm_hw_fini(handle);
}
static int tonga_dpm_resume(void *handle)
{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
-
- ret = tonga_smu_start(adev);
- if (ret) {
- DRM_ERROR("SMU start failed\n");
- goto fail;
- }
-
-fail:
- mutex_unlock(&adev->pm.mutex);
- return ret;
+ return tonga_dpm_hw_init(handle);
}
static int tonga_dpm_set_clockgating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 2adc1c855e85..3e9cbe398151 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -60,6 +60,7 @@
#include "vi.h"
#include "vi_dpm.h"
#include "gmc_v8_0.h"
+#include "gmc_v7_0.h"
#include "gfx_v8_0.h"
#include "sdma_v2_4.h"
#include "sdma_v3_0.h"
@@ -1081,10 +1082,10 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
},
{
.type = AMD_IP_BLOCK_TYPE_GMC,
- .major = 8,
- .minor = 0,
+ .major = 7,
+ .minor = 4,
.rev = 0,
- .funcs = &gmc_v8_0_ip_funcs,
+ .funcs = &gmc_v7_0_ip_funcs,
},
{
.type = AMD_IP_BLOCK_TYPE_IH,
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 541a610667ad..e0b4586a26fd 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
} while (ast_read32(ast, 0x10000) != 0x01);
data = ast_read32(ast, 0x10004);
- if (data & 0x400)
+ if (data & 0x40)
ast->dram_bus_width = 16;
else
ast->dram_bus_width = 32;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 9535c5b60387..7e5a97204051 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -178,7 +178,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
{
struct drm_dp_aux_msg msg;
unsigned int retry;
- int err;
+ int err = 0;
memset(&msg, 0, sizeof(msg));
msg.address = offset;
@@ -186,6 +186,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
msg.buffer = buffer;
msg.size = size;
+ mutex_lock(&aux->hw_mutex);
+
/*
* The specification doesn't give any recommendation on how often to
* retry native transactions. We used to retry 7 times like for
@@ -194,25 +196,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
*/
for (retry = 0; retry < 32; retry++) {
- mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, &msg);
- mutex_unlock(&aux->hw_mutex);
if (err < 0) {
if (err == -EBUSY)
continue;
- return err;
+ goto unlock;
}
switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
case DP_AUX_NATIVE_REPLY_ACK:
if (err < size)
- return -EPROTO;
- return err;
+ err = -EPROTO;
+ goto unlock;
case DP_AUX_NATIVE_REPLY_NACK:
- return -EIO;
+ err = -EIO;
+ goto unlock;
case DP_AUX_NATIVE_REPLY_DEFER:
usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
@@ -221,7 +222,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
}
DRM_DEBUG_KMS("too many retries, giving up\n");
- return -EIO;
+ err = -EIO;
+
+unlock:
+ mutex_unlock(&aux->hw_mutex);
+ return err;
}
/**
@@ -543,9 +548,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
- mutex_lock(&aux->hw_mutex);
ret = aux->transfer(aux, msg);
- mutex_unlock(&aux->hw_mutex);
if (ret < 0) {
if (ret == -EBUSY)
continue;
@@ -684,6 +687,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
memset(&msg, 0, sizeof(msg));
+ mutex_lock(&aux->hw_mutex);
+
for (i = 0; i < num; i++) {
msg.address = msgs[i].addr;
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
@@ -738,6 +743,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
msg.size = 0;
(void)drm_dp_i2c_do_msg(aux, &msg);
+ mutex_unlock(&aux->hw_mutex);
+
return err;
}
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 809959d56d78..39d7e2e15c11 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -798,6 +798,18 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
return mstb;
}
+static void drm_dp_free_mst_port(struct kref *kref);
+
+static void drm_dp_free_mst_branch_device(struct kref *kref)
+{
+ struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
+ if (mstb->port_parent) {
+ if (list_empty(&mstb->port_parent->next))
+ kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
+ }
+ kfree(mstb);
+}
+
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
{
struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
@@ -805,6 +817,15 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
bool wake_tx = false;
/*
+ * init kref again to be used by ports to remove mst branch when it is
+ * not needed anymore
+ */
+ kref_init(kref);
+
+ if (mstb->port_parent && list_empty(&mstb->port_parent->next))
+ kref_get(&mstb->port_parent->kref);
+
+ /*
* destroy all ports - don't need lock
* as there are no more references to the mst branch
* device at this point.
@@ -830,7 +851,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
if (wake_tx)
wake_up(&mstb->mgr->tx_waitq);
- kfree(mstb);
+
+ kref_put(kref, drm_dp_free_mst_branch_device);
}
static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
@@ -878,6 +900,7 @@ static void drm_dp_destroy_port(struct kref *kref)
* from an EDID retrieval */
mutex_lock(&mgr->destroy_connector_lock);
+ kref_get(&port->parent->kref);
list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
@@ -973,17 +996,17 @@ static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u
static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
u8 *rad)
{
- int lct = port->parent->lct;
+ int parent_lct = port->parent->lct;
int shift = 4;
- int idx = lct / 2;
- if (lct > 1) {
- memcpy(rad, port->parent->rad, idx);
- shift = (lct % 2) ? 4 : 0;
+ int idx = (parent_lct - 1) / 2;
+ if (parent_lct > 1) {
+ memcpy(rad, port->parent->rad, idx + 1);
+ shift = (parent_lct % 2) ? 4 : 0;
} else
rad[0] = 0;
rad[idx] |= port->port_num << shift;
- return lct + 1;
+ return parent_lct + 1;
}
/*
@@ -1013,18 +1036,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
return send_link;
}
-static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
- struct drm_dp_mst_port *port)
+static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
{
int ret;
- if (port->dpcd_rev >= 0x12) {
- port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
- if (!port->guid_valid) {
- ret = drm_dp_send_dpcd_write(mstb->mgr,
- port,
- DP_GUID,
- 16, port->guid);
- port->guid_valid = true;
+
+ memcpy(mstb->guid, guid, 16);
+
+ if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
+ if (mstb->port_parent) {
+ ret = drm_dp_send_dpcd_write(
+ mstb->mgr,
+ mstb->port_parent,
+ DP_GUID,
+ 16,
+ mstb->guid);
+ } else {
+
+ ret = drm_dp_dpcd_write(
+ mstb->mgr->aux,
+ DP_GUID,
+ mstb->guid,
+ 16);
}
}
}
@@ -1039,7 +1071,7 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
for (i = 0; i < (mstb->lct - 1); i++) {
int shift = (i % 2) ? 0 : 4;
- int port_num = mstb->rad[i / 2] >> shift;
+ int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
snprintf(temp, sizeof(temp), "-%d", port_num);
strlcat(proppath, temp, proppath_size);
}
@@ -1081,7 +1113,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
port->dpcd_rev = port_msg->dpcd_revision;
port->num_sdp_streams = port_msg->num_sdp_streams;
port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
- memcpy(port->guid, port_msg->peer_guid, 16);
/* manage mstb port lists with mgr lock - take a reference
for this list */
@@ -1094,11 +1125,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
if (old_ddps != port->ddps) {
if (port->ddps) {
- drm_dp_check_port_guid(mstb, port);
if (!port->input)
drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
} else {
- port->guid_valid = false;
port->available_pbn = 0;
}
}
@@ -1157,10 +1186,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
if (old_ddps != port->ddps) {
if (port->ddps) {
- drm_dp_check_port_guid(mstb, port);
dowork = true;
} else {
- port->guid_valid = false;
port->available_pbn = 0;
}
}
@@ -1190,7 +1217,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
for (i = 0; i < lct - 1; i++) {
int shift = (i % 2) ? 0 : 4;
- int port_num = rad[i / 2] >> shift;
+ int port_num = (rad[i / 2] >> shift) & 0xf;
list_for_each_entry(port, &mstb->ports, next) {
if (port->port_num == port_num) {
@@ -1210,6 +1237,48 @@ out:
return mstb;
}
+static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
+ struct drm_dp_mst_branch *mstb,
+ uint8_t *guid)
+{
+ struct drm_dp_mst_branch *found_mstb;
+ struct drm_dp_mst_port *port;
+
+ if (memcmp(mstb->guid, guid, 16) == 0)
+ return mstb;
+
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (!port->mstb)
+ continue;
+
+ found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
+
+ if (found_mstb)
+ return found_mstb;
+ }
+
+ return NULL;
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
+ struct drm_dp_mst_topology_mgr *mgr,
+ uint8_t *guid)
+{
+ struct drm_dp_mst_branch *mstb;
+
+ /* find the port by iterating down */
+ mutex_lock(&mgr->lock);
+
+ mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
+
+ if (mstb)
+ kref_get(&mstb->kref);
+
+ mutex_unlock(&mgr->lock);
+ return mstb;
+}
+
static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
@@ -1320,6 +1389,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
struct drm_dp_sideband_msg_tx *txmsg)
{
struct drm_dp_mst_branch *mstb = txmsg->dst;
+ u8 req_type;
/* both msg slots are full */
if (txmsg->seqno == -1) {
@@ -1336,7 +1406,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
txmsg->seqno = 1;
mstb->tx_slots[txmsg->seqno] = txmsg;
}
- hdr->broadcast = 0;
+
+ req_type = txmsg->msg[0] & 0x7f;
+ if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
+ req_type == DP_RESOURCE_STATUS_NOTIFY)
+ hdr->broadcast = 1;
+ else
+ hdr->broadcast = 0;
hdr->path_msg = txmsg->path_msg;
hdr->lct = mstb->lct;
hdr->lcr = mstb->lct - 1;
@@ -1438,26 +1514,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
}
/* called holding qlock */
-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
+static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_tx *txmsg)
{
- struct drm_dp_sideband_msg_tx *txmsg;
int ret;
/* construct a chunk from the first msg in the tx_msg queue */
- if (list_empty(&mgr->tx_msg_upq)) {
- mgr->tx_up_in_progress = false;
- return;
- }
-
- txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
ret = process_single_tx_qlock(mgr, txmsg, true);
- if (ret == 1) {
- /* up txmsgs aren't put in slots - so free after we send it */
- list_del(&txmsg->next);
- kfree(txmsg);
- } else if (ret)
+
+ if (ret != 1)
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
- mgr->tx_up_in_progress = true;
+
+ txmsg->dst->tx_slots[txmsg->seqno] = NULL;
}
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
@@ -1507,6 +1575,9 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
}
+
+ drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
+
for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
}
@@ -1554,6 +1625,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
return 0;
}
+static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
+{
+ if (!mstb->port_parent)
+ return NULL;
+
+ if (mstb->port_parent->mstb != mstb)
+ return mstb->port_parent;
+
+ return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ int *port_num)
+{
+ struct drm_dp_mst_branch *rmstb = NULL;
+ struct drm_dp_mst_port *found_port;
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_primary) {
+ found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
+
+ if (found_port) {
+ rmstb = found_port->parent;
+ kref_get(&rmstb->kref);
+ *port_num = found_port->port_num;
+ }
+ }
+ mutex_unlock(&mgr->lock);
+ return rmstb;
+}
+
static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int id,
@@ -1561,11 +1663,16 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
{
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
- int len, ret;
+ int len, ret, port_num;
+ port_num = port->port_num;
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
- if (!mstb)
- return -EINVAL;
+ if (!mstb) {
+ mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
+
+ if (!mstb)
+ return -EINVAL;
+ }
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
@@ -1574,7 +1681,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
}
txmsg->dst = mstb;
- len = build_allocate_payload(txmsg, port->port_num,
+ len = build_allocate_payload(txmsg, port_num,
id,
pbn);
@@ -1844,11 +1951,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_encode_up_ack_reply(txmsg, req_type);
mutex_lock(&mgr->qlock);
- list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
- if (!mgr->tx_up_in_progress) {
- process_single_up_tx_qlock(mgr);
- }
+
+ process_single_up_tx_qlock(mgr, txmsg);
+
mutex_unlock(&mgr->qlock);
+
+ kfree(txmsg);
return 0;
}
@@ -1927,31 +2035,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_primary = mstb;
kref_get(&mgr->mst_primary->kref);
- {
- struct drm_dp_payload reset_pay;
- reset_pay.start_slot = 0;
- reset_pay.num_slots = 0x3f;
- drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
- }
-
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+ DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
if (ret < 0) {
goto out_unlock;
}
-
- /* sort out guid */
- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
- if (ret != 16) {
- DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
- goto out_unlock;
- }
-
- mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
- if (!mgr->guid_valid) {
- ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
- mgr->guid_valid = true;
+ {
+ struct drm_dp_payload reset_pay;
+ reset_pay.start_slot = 0;
+ reset_pay.num_slots = 0x3f;
+ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
}
queue_work(system_long_wq, &mgr->work);
@@ -2145,28 +2239,51 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
if (mgr->up_req_recv.have_eomt) {
struct drm_dp_sideband_msg_req_body msg;
- struct drm_dp_mst_branch *mstb;
+ struct drm_dp_mst_branch *mstb = NULL;
bool seqno;
- mstb = drm_dp_get_mst_branch_device(mgr,
- mgr->up_req_recv.initial_hdr.lct,
- mgr->up_req_recv.initial_hdr.rad);
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
+
+ if (!mgr->up_req_recv.initial_hdr.broadcast) {
+ mstb = drm_dp_get_mst_branch_device(mgr,
+ mgr->up_req_recv.initial_hdr.lct,
+ mgr->up_req_recv.initial_hdr.rad);
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
}
seqno = mgr->up_req_recv.initial_hdr.seqno;
drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
- drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
+
+ if (!mstb)
+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
+
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
+
drm_dp_update_port(mstb, &msg.u.conn_stat);
+
DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
(*mgr->cbs->hotplug)(mgr);
} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
- drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
+ if (!mstb)
+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
+
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
+
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
}
@@ -2346,6 +2463,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
if (pbn == port->vcpi.pbn) {
*slots = port->vcpi.num_slots;
+ drm_dp_put_port(port);
return true;
}
}
@@ -2505,32 +2623,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
*/
int drm_dp_calc_pbn_mode(int clock, int bpp)
{
- fixed20_12 pix_bw;
- fixed20_12 fbpp;
- fixed20_12 result;
- fixed20_12 margin, tmp;
- u32 res;
-
- pix_bw.full = dfixed_const(clock);
- fbpp.full = dfixed_const(bpp);
- tmp.full = dfixed_const(8);
- fbpp.full = dfixed_div(fbpp, tmp);
-
- result.full = dfixed_mul(pix_bw, fbpp);
- margin.full = dfixed_const(54);
- tmp.full = dfixed_const(64);
- margin.full = dfixed_div(margin, tmp);
- result.full = dfixed_div(result, margin);
-
- margin.full = dfixed_const(1006);
- tmp.full = dfixed_const(1000);
- margin.full = dfixed_div(margin, tmp);
- result.full = dfixed_mul(result, margin);
-
- result.full = dfixed_div(result, tmp);
- result.full = dfixed_ceil(result);
- res = dfixed_trunc(result);
- return res;
+ u64 kbps;
+ s64 peak_kbps;
+ u32 numerator;
+ u32 denominator;
+
+ kbps = clock * bpp;
+
+ /*
+ * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+ * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
+ * common multiplier to render an integer PBN for all link rate/lane
+ * counts combinations
+ * calculate
+ * peak_kbps *= (1006/1000)
+ * peak_kbps *= (64/54)
+ * peak_kbps *= 8 convert to bytes
+ */
+
+ numerator = 64 * 1006;
+ denominator = 54 * 8 * 1000 * 1000;
+
+ kbps *= numerator;
+ peak_kbps = drm_fixp_from_fraction(kbps, denominator);
+
+ return drm_fixp2int_ceil(peak_kbps);
}
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
@@ -2538,11 +2655,23 @@ static int test_calc_pbn_mode(void)
{
int ret;
ret = drm_dp_calc_pbn_mode(154000, 30);
- if (ret != 689)
+ if (ret != 689) {
+ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+ 154000, 30, 689, ret);
return -EINVAL;
+ }
ret = drm_dp_calc_pbn_mode(234000, 30);
- if (ret != 1047)
+ if (ret != 1047) {
+ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+ 234000, 30, 1047, ret);
+ return -EINVAL;
+ }
+ ret = drm_dp_calc_pbn_mode(297000, 24);
+ if (ret != 1063) {
+ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+ 297000, 24, 1063, ret);
return -EINVAL;
+ }
return 0;
}
@@ -2683,6 +2812,13 @@ static void drm_dp_tx_work(struct work_struct *work)
mutex_unlock(&mgr->qlock);
}
+static void drm_dp_free_mst_port(struct kref *kref)
+{
+ struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
+ kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
+ kfree(port);
+}
+
static void drm_dp_destroy_connector_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
@@ -2703,13 +2839,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
list_del(&port->next);
mutex_unlock(&mgr->destroy_connector_lock);
+ kref_init(&port->kref);
+ INIT_LIST_HEAD(&port->next);
+
mgr->cbs->destroy_connector(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt);
- if (!port->input && port->vcpi.vcpi > 0)
- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
- kfree(port);
+ if (!port->input && port->vcpi.vcpi > 0) {
+ if (mgr->mst_state) {
+ drm_dp_mst_reset_vcpi_slots(mgr, port);
+ drm_dp_update_payload_part1(mgr);
+ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ }
+ }
+
+ kref_put(&port->kref, drm_dp_free_mst_port);
send_hotplug = true;
}
if (send_hotplug)
@@ -2736,7 +2881,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock);
mutex_init(&mgr->destroy_connector_lock);
- INIT_LIST_HEAD(&mgr->tx_msg_upq);
INIT_LIST_HEAD(&mgr->tx_msg_downq);
INIT_LIST_HEAD(&mgr->destroy_connector_list);
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index c19a62561183..26051b849e93 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -25,6 +25,12 @@
#include <drm/drm_fb_cma_helper.h>
#include <linux/module.h>
+#ifdef CONFIG_DRM_CMA_FBDEV_BUFFER_NUM
+#define FBDEV_BUFFER_NUM CONFIG_DRM_CMA_FBDEV_BUFFER_NUM
+#else
+#define FBDEV_BUFFER_NUM 1
+#endif
+
struct drm_fb_cma {
struct drm_framebuffer fb;
struct drm_gem_cma_object *obj[4];
@@ -253,7 +259,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
+ mode_cmd.height = sizes->surface_height * FBDEV_BUFFER_NUM;
mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 607f493ae801..8090989185b2 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -221,6 +221,64 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
}
+ /*
+ * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
+ * interval? If so then vblank irqs keep running and it will likely
+ * happen that the hardware vblank counter is not trustworthy as it
+ * might reset at some point in that interval and vblank timestamps
+ * are not trustworthy either in that interval. Iow. this can result
+ * in a bogus diff >> 1 which must be avoided as it would cause
+ * random large forward jumps of the software vblank counter.
+ */
+ if (diff > 1 && (vblank->inmodeset & 0x2)) {
+ DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
+ " due to pre-modeset.\n", pipe, diff);
+ diff = 1;
+ }
+
+ /*
+ * FIMXE: Need to replace this hack with proper seqlocks.
+ *
+ * Restrict the bump of the software vblank counter to a safe maximum
+ * value of +1 whenever there is the possibility that concurrent readers
+ * of vblank timestamps could be active at the moment, as the current
+ * implementation of the timestamp caching and updating is not safe
+ * against concurrent readers for calls to store_vblank() with a bump
+ * of anything but +1. A bump != 1 would very likely return corrupted
+ * timestamps to userspace, because the same slot in the cache could
+ * be concurrently written by store_vblank() and read by one of those
+ * readers without the read-retry logic detecting the collision.
+ *
+ * Concurrent readers can exist when we are called from the
+ * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
+ * irq callers. However, all those calls to us are happening with the
+ * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
+ * can't increase while we are executing. Therefore a zero refcount at
+ * this point is safe for arbitrary counter bumps if we are called
+ * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
+ * we must also accept a refcount of 1, as whenever we are called from
+ * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
+ * we must let that one pass through in order to not lose vblank counts
+ * during vblank irq off - which would completely defeat the whole
+ * point of this routine.
+ *
+ * Whenever we are called from vblank irq, we have to assume concurrent
+ * readers exist or can show up any time during our execution, even if
+ * the refcount is currently zero, as vblank irqs are usually only
+ * enabled due to the presence of readers, and because when we are called
+ * from vblank irq we can't hold the vbl_lock to protect us from sudden
+ * bumps in vblank refcount. Therefore also restrict bumps to +1 when
+ * called from vblank irq.
+ */
+ if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
+ (flags & DRM_CALLED_FROM_VBLIRQ))) {
+ DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
+ "refcount %u, vblirq %u\n", pipe, diff,
+ atomic_read(&vblank->refcount),
+ (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
+ diff = 1;
+ }
+
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%u, diff=%u, hw=%u hw_last=%u\n",
pipe, vblank->count, diff, cur_vblank, vblank->last);
@@ -1313,7 +1371,13 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
spin_lock_irqsave(&dev->event_lock, irqflags);
spin_lock(&dev->vbl_lock);
- vblank_disable_and_save(dev, pipe);
+ DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
+ pipe, vblank->enabled, vblank->inmodeset);
+
+ /* Avoid redundant vblank disables without previous drm_vblank_on(). */
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
+ vblank_disable_and_save(dev, pipe);
+
wake_up(&vblank->queue);
/*
@@ -1415,6 +1479,9 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
return;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
+ pipe, vblank->enabled, vblank->inmodeset);
+
/* Drop our private "prevent drm_vblank_get" refcount */
if (vblank->inmodeset) {
atomic_dec(&vblank->refcount);
@@ -1427,8 +1494,7 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
* re-enable interrupts if there are users left, or the
* user wishes vblank interrupts to be enabled all the time.
*/
- if (atomic_read(&vblank->refcount) != 0 ||
- (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
+ if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
WARN_ON(drm_vblank_enable(dev, pipe));
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
@@ -1523,6 +1589,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
if (vblank->inmodeset) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = true;
+ drm_reset_vblank_timestamp(dev, pipe);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
if (vblank->inmodeset & 0x2)
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 2d5ca8eec13a..e487f56ffc4c 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -45,9 +45,26 @@
* subset of the MIPI DCS command set.
*/
+static const struct device_type mipi_dsi_device_type;
+
static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
{
- return of_driver_match_device(dev, drv);
+ struct mipi_dsi_device *dsi;
+
+ dsi = dev->type == &mipi_dsi_device_type ?
+ to_mipi_dsi_device(dev) : NULL;
+
+ if (!dsi)
+ return 0;
+
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (!strcmp(drv->name, "mipi_dsi_dummy") &&
+ !strcmp(dsi->name, "dummy"))
+ return 1;
+
+ return 0;
}
static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
@@ -102,9 +119,41 @@ static const struct device_type mipi_dsi_device_type = {
.release = mipi_dsi_dev_release,
};
-static struct mipi_dsi_device *mipi_dsi_device_alloc(struct mipi_dsi_host *host)
+struct mipi_dsi_device_info {
+ char name[DSI_DEV_NAME_SIZE];
+ u32 reg;
+ struct device_node *node;
+};
+
+static int __dsi_check_chan_busy(struct device *dev, void *data)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+ u32 reg = *(u32 *) data;
+
+ if (dsi && dsi->channel == reg)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int mipi_dsi_check_chan_busy(struct mipi_dsi_host *host, u32 reg)
+{
+ return device_for_each_child(host->dev, &reg, __dsi_check_chan_busy);
+}
+
+static struct mipi_dsi_device *
+mipi_dsi_device_new(struct mipi_dsi_host *host,
+ struct mipi_dsi_device_info *info)
{
+ struct device *dev = host->dev;
struct mipi_dsi_device *dsi;
+ int r;
+
+ if (info->reg > 3) {
+ dev_err(dev, "dsi device %s has invalid channel value: %u\n",
+ info->name, info->reg);
+ return ERR_PTR(-EINVAL);
+ }
dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
if (!dsi)
@@ -114,62 +163,90 @@ static struct mipi_dsi_device *mipi_dsi_device_alloc(struct mipi_dsi_host *host)
dsi->dev.bus = &mipi_dsi_bus_type;
dsi->dev.parent = host->dev;
dsi->dev.type = &mipi_dsi_device_type;
+ dsi->dev.of_node = info->node;
+ dsi->channel = info->reg;
+ strlcpy(dsi->name, info->name, sizeof(dsi->name));
- device_initialize(&dsi->dev);
+ dev_set_name(&dsi->dev, "%s.%d", dev_name(host->dev), info->reg);
- return dsi;
-}
-
-static int mipi_dsi_device_add(struct mipi_dsi_device *dsi)
-{
- struct mipi_dsi_host *host = dsi->host;
+ r = mipi_dsi_check_chan_busy(host, info->reg);
+ if (r)
+ goto err;
- dev_set_name(&dsi->dev, "%s.%d", dev_name(host->dev), dsi->channel);
+ r = device_register(&dsi->dev);
+ if (r)
+ goto err;
- return device_add(&dsi->dev);
+ return dsi;
+err:
+ kfree(dsi);
+ return ERR_PTR(r);
}
static struct mipi_dsi_device *
of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
{
- struct mipi_dsi_device *dsi;
struct device *dev = host->dev;
+ struct mipi_dsi_device_info info = { };
int ret;
- u32 reg;
- ret = of_property_read_u32(node, "reg", &reg);
+ if (of_modalias_node(node, info.name, sizeof(info.name)) < 0) {
+ dev_err(dev, "modalias failure on %s\n", node->full_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = of_property_read_u32(node, "reg", &info.reg);
if (ret) {
dev_err(dev, "device node %s has no valid reg property: %d\n",
node->full_name, ret);
return ERR_PTR(-EINVAL);
}
- if (reg > 3) {
- dev_err(dev, "device node %s has invalid reg property: %u\n",
- node->full_name, reg);
- return ERR_PTR(-EINVAL);
- }
+ info.node = of_node_get(node);
- dsi = mipi_dsi_device_alloc(host);
- if (IS_ERR(dsi)) {
- dev_err(dev, "failed to allocate DSI device %s: %ld\n",
- node->full_name, PTR_ERR(dsi));
- return dsi;
- }
+ return mipi_dsi_device_new(host, &info);
+}
+
+static struct mipi_dsi_driver dummy_dsi_driver = {
+ .driver.name = "mipi_dsi_dummy",
+};
- dsi->dev.of_node = of_node_get(node);
- dsi->channel = reg;
+struct mipi_dsi_device *mipi_dsi_new_dummy(struct mipi_dsi_host *host, u32 reg)
+{
+ struct mipi_dsi_device_info info = { "dummy", reg, NULL, };
- ret = mipi_dsi_device_add(dsi);
- if (ret) {
- dev_err(dev, "failed to add DSI device %s: %d\n",
- node->full_name, ret);
- kfree(dsi);
- return ERR_PTR(ret);
+ return mipi_dsi_device_new(host, &info);
+}
+EXPORT_SYMBOL(mipi_dsi_new_dummy);
+
+void mipi_dsi_unregister_device(struct mipi_dsi_device *dsi)
+{
+ if (dsi)
+ device_unregister(&dsi->dev);
+}
+EXPORT_SYMBOL(mipi_dsi_unregister_device);
+
+static DEFINE_MUTEX(host_lock);
+static LIST_HEAD(host_list);
+
+struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node)
+{
+ struct mipi_dsi_host *host;
+
+ mutex_lock(&host_lock);
+
+ list_for_each_entry(host, &host_list, list) {
+ if (host->dev->of_node == node) {
+ mutex_unlock(&host_lock);
+ return host;
+ }
}
- return dsi;
+ mutex_unlock(&host_lock);
+
+ return NULL;
}
+EXPORT_SYMBOL(of_find_mipi_dsi_host_by_node);
int mipi_dsi_host_register(struct mipi_dsi_host *host)
{
@@ -182,6 +259,10 @@ int mipi_dsi_host_register(struct mipi_dsi_host *host)
of_mipi_dsi_device_add(host, node);
}
+ mutex_lock(&host_lock);
+ list_add_tail(&host->list, &host_list);
+ mutex_unlock(&host_lock);
+
return 0;
}
EXPORT_SYMBOL(mipi_dsi_host_register);
@@ -198,6 +279,10 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
void mipi_dsi_host_unregister(struct mipi_dsi_host *host)
{
device_for_each_child(host->dev, NULL, mipi_dsi_remove_device_fn);
+
+ mutex_lock(&host_lock);
+ list_del_init(&host->list);
+ mutex_unlock(&host_lock);
}
EXPORT_SYMBOL(mipi_dsi_host_unregister);
@@ -924,7 +1009,13 @@ EXPORT_SYMBOL(mipi_dsi_driver_unregister);
static int __init mipi_dsi_bus_init(void)
{
- return bus_register(&mipi_dsi_bus_type);
+ int ret;
+
+ ret = bus_register(&mipi_dsi_bus_type);
+ if (ret < 0)
+ return ret;
+
+ return mipi_dsi_driver_register(&dummy_dsi_driver);
}
postcore_initcall(mipi_dsi_bus_init);
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index c707fa6fca85..e3bdc8b1c32c 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -130,7 +130,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
return ret;
}
/* We have the initial and handle reference but need only one now */
- drm_gem_object_unreference(&r->gem);
+ drm_gem_object_unreference_unlocked(&r->gem);
*handlep = handle;
return 0;
}
diff --git a/drivers/gpu/drm/hisilicon/Kconfig b/drivers/gpu/drm/hisilicon/Kconfig
new file mode 100644
index 000000000000..f1c33c26b664
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/Kconfig
@@ -0,0 +1,10 @@
+config DRM_HISI
+ tristate "DRM Support for Hisilicon SoCs Platform"
+ depends on DRM
+ select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_MIPI_DSI
+ help
+ Choose this option if you have a hisilicon chipsets(hi6220).
+ If M is selected the module will be called hisi-drm.
diff --git a/drivers/gpu/drm/hisilicon/Makefile b/drivers/gpu/drm/hisilicon/Makefile
new file mode 100644
index 000000000000..5083c1ffd43e
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/Makefile
@@ -0,0 +1,5 @@
+hisi-drm-y := hisi_drm_drv.o \
+ hisi_drm_ade.o \
+ hisi_drm_dsi.o
+
+obj-$(CONFIG_DRM_HISI) += hisi-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hisi_ade_reg.h b/drivers/gpu/drm/hisilicon/hisi_ade_reg.h
new file mode 100644
index 000000000000..1055cce5158e
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hisi_ade_reg.h
@@ -0,0 +1,494 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __HISI_ADE_REG_H__
+#define __HISI_ADE_REG_H__
+
+/*
+ * ADE Registers Offset
+ */
+#define ADE_CTRL (0x4)
+#define ADE_CTRL1 (0x8C)
+#define ADE_ROT_SRC_CFG (0x10)
+#define ADE_DISP_SRC_CFG (0x18)
+#define ADE_WDMA2_SRC_CFG (0x1C)
+#define ADE_SEC_OVLY_SRC_CFG (0x20)
+#define ADE_WDMA3_SRC_CFG (0x24)
+#define ADE_OVLY1_TRANS_CFG (0x2C)
+#define ADE_EN (0x100)
+#define INTR_MASK_CPU_0 (0xC10)
+#define INTR_MASK_CPU_1 (0xC14)
+#define ADE_FRM_DISGARD_CTRL (0xA4)
+/* reset and reload regs */
+#define ADE_SOFT_RST_SEL0 (0x78)
+#define ADE_SOFT_RST_SEL1 (0x7C)
+#define ADE_RELOAD_DIS0 (0xAC)
+#define ADE_RELOAD_DIS1 (0xB0)
+#define ADE_CH_RDMA_BIT_OFST (0)
+#define ADE_CLIP_BIT_OFST (15)
+#define ADE_SCL_BIT_OFST (21)
+#define ADE_CTRAN_BIT_OFST (24)
+#define ADE_OVLY_BIT_OFST (37) /* 32+5 */
+/* channel regs */
+#define RD_CH_PE(x) (0x1000 + (x) * 0x80)
+#define RD_CH_CTRL(x) (0x1004 + (x) * 0x80)
+#define RD_CH_ADDR(x) (0x1008 + (x) * 0x80)
+#define RD_CH_SIZE(x) (0x100C + (x) * 0x80)
+#define RD_CH_STRIDE(x) (0x1010 + (x) * 0x80)
+#define RD_CH_SPACE(x) (0x1014 + (x) * 0x80)
+#define RD_CH_PARTIAL_SIZE(x) (0x1018 + (x) * 0x80)
+#define RD_CH_PARTIAL_SPACE(x) (0x101C + (x) * 0x80)
+#define RD_CH_EN(x) (0x1020 + (x) * 0x80)
+#define RD_CH_STATUS(x) (0x1024 + (x) * 0x80)
+#define RD_CH_DISP_CTRL (0x1404)
+#define RD_CH_DISP_ADDR (0x1408)
+#define RD_CH_DISP_SIZE (0x140C)
+#define RD_CH_DISP_STRIDE (0x1410)
+#define RD_CH_DISP_SPACE (0x1414)
+#define RD_CH_DISP_EN (0x142C)
+/* clip regs */
+#define ADE_CLIP_DISABLE(x) (0x6800 + (x) * 0x100)
+#define ADE_CLIP_SIZE0(x) (0x6804 + (x) * 0x100)
+#define ADE_CLIP_SIZE1(x) (0x6808 + (x) * 0x100)
+#define ADE_CLIP_SIZE2(x) (0x680C + (x) * 0x100)
+#define ADE_CLIP_CFG_OK(x) (0x6810 + (x) * 0x100)
+/* scale regs */
+#define ADE_SCL1_MUX_CFG (0xC)
+#define ADE_SCL2_SRC_CFG (0x14)
+#define ADE_SCL3_MUX_CFG (0x8)
+#define ADE_SCL_CTRL(x) (0x3000 + (x) * 0x800)
+#define ADE_SCL_HSP(x) (0x3004 + (x) * 0x800)
+#define ADE_SCL_UV_HSP(x) (0x3008 + (x) * 0x800)
+#define ADE_SCL_VSP(x) (0x300C + (x) * 0x800)
+#define ADE_SCL_UV_VSP(x) (0x3010 + (x) * 0x800)
+#define ADE_SCL_ORES(x) (0x3014 + (x) * 0x800)
+#define ADE_SCL_IRES(x) (0x3018 + (x) * 0x800)
+#define ADE_SCL_START(x) (0x301C + (x) * 0x800)
+#define ADE_SCL_ERR(x) (0x3020 + (x) * 0x800)
+#define ADE_SCL_PIX_OFST(x) (0x3024 + (x) * 0x800)
+#define ADE_SCL_UV_PIX_OFST(x) (0x3028 + (x) * 0x800)
+#define ADE_SCL_COEF_CLR(x) (0x3030 + (x) * 0x800)
+#define ADE_SCL_HCOEF(x, m, n) (0x3100 + (x) * 0x800 + \
+ 12 * (m) + 4 * (n))
+#define ADE_SCL_VCOEF(x, i, j) (0x340C + (x) * 0x800 + \
+ 12 * (i) + 4 * (j))
+/* ctran regs */
+#define ADE_CTRAN5_TRANS_CFG (0x40)
+#define ADE_CTRAN_DIS(x) (0x5004 + (x) * 0x100)
+#define ADE_CTRAN_MODE_CHOOSE(x) (0x5008 + (x) * 0x100)
+#define ADE_CTRAN_STAT(x) (0x500C + (x) * 0x100)
+#define ADE_CTRAN_CHDC0(x) (0x5010 + (x) * 0x100)
+#define ADE_CTRAN_CHDC1(x) (0x5014 + (x) * 0x100)
+#define ADE_CTRAN_CHDC2(x) (0x5018 + (x) * 0x100)
+#define ADE_CTRAN_CHDC3(x) (0x501C + (x) * 0x100)
+#define ADE_CTRAN_CHDC4(x) (0x5020 + (x) * 0x100)
+#define ADE_CTRAN_CHDC5(x) (0x5024 + (x) * 0x100)
+#define ADE_CTRAN_CSC0(x) (0x5028 + (x) * 0x100)
+#define ADE_CTRAN_CSC1(x) (0x502C + (x) * 0x100)
+#define ADE_CTRAN_CSC2(x) (0x5030 + (x) * 0x100)
+#define ADE_CTRAN_CSC3(x) (0x5034 + (x) * 0x100)
+#define ADE_CTRAN_CSC4(x) (0x5038 + (x) * 0x100)
+#define ADE_CTRAN_IMAGE_SIZE(x) (0x503C + (x) * 0x100)
+#define ADE_CTRAN_CFG_OK(x) (0x5040 + (x) * 0x100)
+/* overlay regs */
+#define ADE_OVLY_ALPHA_ST (0x2000)
+#define ADE_OVLY_CH_XY0(x) (0x2004 + (x) * 4)
+#define ADE_OVLY_CH_XY1(x) (0x2024 + (x) * 4)
+#define ADE_OVLY_CH_CTL(x) (0x204C + (x) * 4)
+#define ADE_OVLY_OUTPUT_SIZE(x) (0x2070 + (x) * 8)
+#define ADE_OVLY_BASE_COLOR(x) (0x2074 + (x) * 8)
+#define ADE_OVLYX_CTL(x) (0x209C + (x) * 4)
+#define ADE_OVLY_CTL (0x98)
+#define ADE_OVLY_CH_ALP_MODE_OFST (0)
+#define ADE_OVLY_CH_ALP_SEL_OFST (2)
+#define ADE_OVLY_CH_UNDER_ALP_SEL_OFST (4)
+#define ADE_OVLY_CH_EN_OFST (6)
+#define ADE_OVLY_CH_ALP_GBL_OFST (15)
+#define ADE_OVLY_CH_SEL_OFST (28)
+
+/*
+ * media regs
+ */
+#define SC_MEDIA_RSTDIS (0x530)
+#define SC_MEDIA_RSTEN (0x52C)
+#define NOC_ADE0_QOSGENERATOR_MODE 0x010C
+#define NOC_ADE0_QOSGENERATOR_EXTCONTROL 0x0118
+#define NOC_ADE1_QOSGENERATOR_MODE 0x020C
+#define NOC_ADE1_QOSGENERATOR_EXTCONTROL 0x0218
+
+/*
+ * regs relevant enum
+ */
+enum {
+ LDI_TEST = 0,
+ LDI_WORK
+};
+
+enum {
+ LDI_ISR_FRAME_END_INT = 0x2,
+ LDI_ISR_UNDER_FLOW_INT = 0x4
+};
+
+enum {
+ ADE_ISR1_RES_SWITCH_CMPL = 0x80000000
+};
+
+enum {
+ LDI_DISP_MODE_NOT_3D_FBF = 0,
+ LDI_DISP_MODE_3D_FBF
+};
+
+enum {
+ ADE_RGB = 0,
+ ADE_BGR
+};
+
+enum {
+ ADE_DISABLE = 0,
+ ADE_ENABLE
+};
+
+enum {
+ ADE_OUT_RGB_565 = 0,
+ ADE_OUT_RGB_666,
+ ADE_OUT_RGB_888
+};
+
+/*
+ * ADE read as big-endian, so revert the
+ * rgb order described in the SoC datasheet
+ */
+enum ADE_FORMAT {
+ ADE_RGB_565 = 0,
+ ADE_BGR_565,
+ ADE_XRGB_8888,
+ ADE_XBGR_8888,
+ ADE_ARGB_8888,
+ ADE_ABGR_8888,
+ ADE_RGBA_8888,
+ ADE_BGRA_8888,
+ ADE_RGB_888,
+ ADE_BGR_888 = 9,
+ ADE_FORMAT_NOT_SUPPORT = 800
+};
+
+/* ldi src cfg */
+enum {
+ TOP_DISP_SRC_NONE = 0,
+ TOP_DISP_SRC_OVLY2,
+ TOP_DISP_SRC_DISP,
+ TOP_DISP_SRC_ROT,
+ TOP_DISP_SRC_SCL2
+};
+
+enum {
+ ADE_ISR_DMA_ERROR = 0x2000000
+};
+
+enum ade_channel {
+ ADE_CH1 = 0, /* channel 1 for primary plane */
+ ADE_CH_NUM
+};
+
+enum ade_scale {
+ ADE_SCL1 = 0,
+ ADE_SCL2,
+ ADE_SCL3,
+ ADE_SCL_NUM
+};
+
+enum ade_ctran {
+ ADE_CTRAN1 = 0,
+ ADE_CTRAN2,
+ ADE_CTRAN3,
+ ADE_CTRAN4,
+ ADE_CTRAN5,
+ ADE_CTRAN6,
+ ADE_CTRAN_NUM
+};
+
+enum ade_overlay {
+ ADE_OVLY1 = 0,
+ ADE_OVLY2,
+ ADE_OVLY3,
+ ADE_OVLY_NUM
+};
+
+enum {
+ ADE_ALP_GLOBAL = 0,
+ ADE_ALP_PIXEL,
+ ADE_ALP_PIXEL_AND_GLB
+};
+
+enum {
+ ADE_ALP_MUL_COEFF_0 = 0, /* alpha */
+ ADE_ALP_MUL_COEFF_1, /* 1-alpha */
+ ADE_ALP_MUL_COEFF_2, /* 0 */
+ ADE_ALP_MUL_COEFF_3 /* 1 */
+};
+
+/*
+ * ADE Register Union Struct
+ */
+union U_ADE_CTRL1 {
+struct {
+ unsigned int auto_clk_gate_en :1;
+ unsigned int rot_buf_shr_out :1;
+ unsigned int reserved_44 :30;
+ } bits;
+ unsigned int u32;
+};
+
+union U_ADE_SOFT_RST_SEL0 {
+struct {
+ unsigned int ch1_rdma_srst_sel :1;
+ unsigned int ch2_rdma_srst_sel :1;
+ unsigned int ch3_rdma_srst_sel :1;
+ unsigned int ch4_rdma_srst_sel :1;
+ unsigned int ch5_rdma_srst_sel :1;
+ unsigned int ch6_rdma_srst_sel :1;
+ unsigned int disp_rdma_srst_sel :1;
+ unsigned int cmdq1_rdma_srst_sel :1;
+ unsigned int cmdq2_rdma_srst_sel :1;
+ unsigned int reserved_29 :1;
+ unsigned int ch1_wdma_srst_sel :1;
+ unsigned int ch2_wdma_srst_sel :1;
+ unsigned int ch3_wdma_srst_sel :1;
+ unsigned int reserved_28 :1;
+ unsigned int cmdq_wdma_srst_sel :1;
+ unsigned int clip1_srst_sel :1;
+ unsigned int clip2_srst_sel :1;
+ unsigned int clip3_srst_sel :1;
+ unsigned int clip4_srst_sel :1;
+ unsigned int clip5_srst_sel :1;
+ unsigned int clip6_srst_sel :1;
+ unsigned int scl1_srst_sel :1;
+ unsigned int scl2_srst_sel :1;
+ unsigned int scl3_srst_sel :1;
+ unsigned int ctran1_srst_sel :1;
+ unsigned int ctran2_srst_sel :1;
+ unsigned int ctran3_srst_sel :1;
+ unsigned int ctran4_srst_sel :1;
+ unsigned int ctran5_srst_sel :1;
+ unsigned int ctran6_srst_sel :1;
+ unsigned int rot_srst_sel :1;
+ unsigned int reserved_27 :1;
+ } bits;
+ unsigned int u32;
+};
+
+union U_ADE_CTRL {
+struct {
+ unsigned int frm_end_start :2;
+ unsigned int dfs_buf_cfg :1;
+ unsigned int rot_buf_cfg :3;
+ unsigned int rd_ch5_nv :1;
+ unsigned int rd_ch6_nv :1;
+ unsigned int dfs_buf_unflow_lev1 :13;
+ unsigned int dfs_buf_unflow_lev2 :11;
+ } bits;
+ unsigned int u32;
+};
+
+/*
+ * ADE Register Write/Read functions
+ */
+static inline void set_TOP_CTL_clk_gate_en(u8 *base, u32 val)
+{
+ union U_ADE_CTRL1 ade_ctrl1;
+ u8 *reg_addr = base + ADE_CTRL1;
+
+ ade_ctrl1.u32 = readl(reg_addr);
+ ade_ctrl1.bits.auto_clk_gate_en = val;
+ writel(ade_ctrl1.u32, reg_addr);
+}
+
+static inline void set_TOP_SOFT_RST_SEL0_disp_rdma(u8 *base, u32 val)
+{
+ union U_ADE_SOFT_RST_SEL0 ade_soft_rst;
+ u8 *addr = base + ADE_SOFT_RST_SEL0;
+
+ ade_soft_rst.u32 = readl(addr);
+ ade_soft_rst.bits.disp_rdma_srst_sel = val;
+ writel(ade_soft_rst.u32, addr);
+}
+
+static inline void set_TOP_SOFT_RST_SEL0_ctran5(u8 *base, u32 val)
+{
+ union U_ADE_SOFT_RST_SEL0 ade_soft_rst;
+ u8 *addr = base + ADE_SOFT_RST_SEL0;
+
+ ade_soft_rst.u32 = readl(addr);
+ ade_soft_rst.bits.ctran5_srst_sel = val;
+ writel(ade_soft_rst.u32, addr);
+}
+
+static inline void set_TOP_SOFT_RST_SEL0_ctran6(u8 *base, u32 val)
+{
+ union U_ADE_SOFT_RST_SEL0 ade_soft_rst;
+ u8 *addr = base + ADE_SOFT_RST_SEL0;
+
+ ade_soft_rst.u32 = readl(addr);
+ ade_soft_rst.bits.ctran6_srst_sel = val;
+ writel(ade_soft_rst.u32, addr);
+}
+
+static inline void set_TOP_CTL_frm_end_start(u8 *base, u32 val)
+{
+ union U_ADE_CTRL ade_ctrl;
+ u8 *reg_addr = base + ADE_CTRL;
+
+ ade_ctrl.u32 = readl(reg_addr);
+ ade_ctrl.bits.frm_end_start = val;
+ writel(ade_ctrl.u32, reg_addr);
+}
+
+/*
+ * LDI Registers Offset
+ */
+#define LDI_HRZ_CTRL0 (0x7400)
+#define LDI_HRZ_CTRL1 (0x7404)
+#define LDI_VRT_CTRL0 (0x7408)
+#define LDI_VRT_CTRL1 (0x740C)
+#define LDI_PLR_CTRL (0x7410)
+#define LDI_DSP_SIZE (0x7414)
+#define LDI_INT_EN (0x741C)
+#define LDI_CTRL (0x7420)
+#define LDI_ORG_INT (0x7424)
+#define LDI_MSK_INT (0x7428)
+#define LDI_INT_CLR (0x742C)
+#define LDI_WORK_MODE (0x7430)
+#define LDI_DE_SPACE_LOW (0x7438)
+#define LDI_MCU_INTS (0x7450)
+#define LDI_MCU_INTE (0x7454)
+#define LDI_MCU_INTC (0x7458)
+#define LDI_HDMI_DSI_GT (0x7434)
+
+/*
+ * LDI Timing Polarity defines
+ */
+#define HISI_LDI_FLAG_NVSYNC BIT(0)
+#define HISI_LDI_FLAG_NHSYNC BIT(1)
+#define HISI_LDI_FLAG_NPIXCLK BIT(2)
+#define HISI_LDI_FLAG_NDE BIT(3)
+
+/*
+ * LDI Register Union Struct
+ */
+union U_LDI_CTRL {
+struct {
+ unsigned int ldi_en :1;
+ unsigned int disp_mode_buf :1;
+ unsigned int date_gate_en :1;
+ unsigned int bpp :2;
+ unsigned int wait_vsync_en :1;
+ unsigned int corlorbar_width :7;
+ unsigned int bgr :1;
+ unsigned int color_mode :1;
+ unsigned int shutdown :1;
+ unsigned int vactive_line :12;
+ unsigned int ldi_en_self_clr :1;
+ unsigned int reserved_573 :3;
+ } bits;
+ unsigned int u32;
+};
+
+union U_LDI_WORK_MODE {
+struct {
+ unsigned int work_mode :1;
+ unsigned int wback_en :1;
+ unsigned int colorbar_en :1;
+ unsigned int reserved_577 :29;
+ } bits;
+ unsigned int u32;
+};
+
+/*
+ * LDI Register Write/Read Helper functions
+ */
+static inline void set_reg(u8 *addr, u32 val, u32 bw, u32 bs)
+{
+ u32 mask = (1 << bw) - 1;
+ u32 tmp = readl(addr);
+
+ tmp &= ~(mask << bs);
+ writel(tmp | ((val & mask) << bs), addr);
+}
+
+static inline void set_LDI_CTRL_ldi_en(u8 *base, u32 val)
+{
+ union U_LDI_CTRL ldi_ctrl;
+ u8 *addr = base + LDI_CTRL;
+
+ ldi_ctrl.u32 = readl(addr);
+ ldi_ctrl.bits.ldi_en = val;
+ writel(ldi_ctrl.u32, addr);
+}
+
+static inline void set_LDI_CTRL_disp_mode(u8 *base, u32 val)
+{
+ union U_LDI_CTRL ldi_ctrl;
+ u8 *addr = base + LDI_CTRL;
+
+ ldi_ctrl.u32 = readl(addr);
+ ldi_ctrl.bits.disp_mode_buf = val;
+ writel(ldi_ctrl.u32, addr);
+}
+
+static inline void set_LDI_CTRL_bpp(u8 *base, u32 val)
+{
+ union U_LDI_CTRL ldi_ctrl;
+ u8 *addr = base + LDI_CTRL;
+
+ ldi_ctrl.u32 = readl(addr);
+ ldi_ctrl.bits.bpp = val;
+ writel(ldi_ctrl.u32, addr);
+}
+
+static inline void set_LDI_CTRL_corlorbar_width(u8 *base, u32 val)
+{
+ union U_LDI_CTRL ldi_ctrl;
+ u8 *addr = base + LDI_CTRL;
+
+ ldi_ctrl.u32 = readl(addr);
+ ldi_ctrl.bits.corlorbar_width = (val > 0) ? val - 1 : 0;
+ writel(ldi_ctrl.u32, addr);
+}
+
+static inline void set_LDI_CTRL_bgr(u8 *base, u32 val)
+{
+ union U_LDI_CTRL ldi_ctrl;
+ u8 *addr = base + LDI_CTRL;
+
+ ldi_ctrl.u32 = readl(addr);
+ ldi_ctrl.bits.bgr = val;
+ writel(ldi_ctrl.u32, addr);
+}
+
+static inline void set_LDI_WORK_MODE_work_mode(u8 *base, u32 val)
+{
+ union U_LDI_WORK_MODE ldi_work_mode;
+ u8 *addr = base + LDI_WORK_MODE;
+
+ ldi_work_mode.u32 = readl(addr);
+ ldi_work_mode.bits.work_mode = val;
+ writel(ldi_work_mode.u32, addr);
+}
+
+static inline void set_LDI_WORK_MODE_colorbar_en(u8 *base, u32 val)
+{
+ union U_LDI_WORK_MODE ldi_work_mode;
+ u8 *addr = base + LDI_WORK_MODE;
+
+ ldi_work_mode.u32 = readl(addr);
+ ldi_work_mode.bits.colorbar_en = val;
+ writel(ldi_work_mode.u32, addr);
+}
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hisi_drm_ade.c b/drivers/gpu/drm/hisilicon/hisi_drm_ade.c
new file mode 100644
index 000000000000..64365f5ceaf8
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hisi_drm_ade.c
@@ -0,0 +1,1100 @@
+/*
+ * Hisilicon Hi6220 SoC ADE(Advanced Display Engine)'s crtc&plane driver
+ *
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ * Author:
+ * Xinliang Liu <xinliang.liu@linaro.org>
+ * Xinliang Liu <z.liuxinliang@hisilicon.com>
+ * Xinwei Kong <kong.kongxinwei@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <video/display_timing.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "hisi_drm_drv.h"
+#include "hisi_ade_reg.h"
+
+#define FORCE_PIXEL_CLOCK_SAME_OR_HIGHER 0
+#define PRIMARY_CH (ADE_CH1)
+
+#define to_ade_crtc(crtc) \
+ container_of(crtc, struct ade_crtc, base)
+
+#define to_ade_plane(plane) \
+ container_of(plane, struct ade_plane, base)
+
+struct ade_hw_ctx {
+ void __iomem *base;
+ void __iomem *media_base;
+ void __iomem *media_noc_base;
+
+ int irq;
+ u32 ade_core_rate;
+ u32 media_noc_rate;
+
+ struct clk *ade_core_clk;
+ struct clk *media_noc_clk;
+ struct clk *ade_pix_clk;
+ bool power_on;
+};
+
+struct ade_crtc {
+ struct drm_crtc base;
+ struct ade_hw_ctx *ctx;
+ bool enable;
+ u64 use_mask;
+};
+
+struct ade_plane {
+ struct drm_plane base;
+ void *ctx;
+ u8 ch; /* channel */
+};
+
+struct ade_data {
+ struct ade_crtc acrtc;
+ struct ade_plane aplane[ADE_CH_NUM];
+ struct ade_hw_ctx ctx;
+};
+
+/* ade-format info: */
+struct ade_format {
+ u32 pixel_format;
+ enum ADE_FORMAT ade_format;
+};
+
+static const struct ade_format ade_formats[] = {
+ /* 16bpp RGB: */
+ { DRM_FORMAT_RGB565, ADE_RGB_565 },
+ { DRM_FORMAT_BGR565, ADE_BGR_565 },
+ /* 24bpp RGB: */
+ { DRM_FORMAT_RGB888, ADE_RGB_888 },
+ { DRM_FORMAT_BGR888, ADE_BGR_888 },
+ /* 32bpp [A]RGB: */
+ { DRM_FORMAT_XRGB8888, ADE_XRGB_8888 },
+ { DRM_FORMAT_XBGR8888, ADE_XBGR_8888 },
+ { DRM_FORMAT_RGBA8888, ADE_RGBA_8888 },
+ { DRM_FORMAT_BGRA8888, ADE_BGRA_8888 },
+ { DRM_FORMAT_ARGB8888, ADE_ARGB_8888 },
+ { DRM_FORMAT_ABGR8888, ADE_ABGR_8888 },
+};
+
+static const u32 channel_formats1[] = {
+ /* channel 1,2,3,4 */
+ DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888
+};
+
+u32 ade_get_channel_formats(u8 ch, const u32 **formats)
+{
+ switch (ch) {
+ case ADE_CH1:
+ *formats = channel_formats1;
+ return ARRAY_SIZE(channel_formats1);
+ default:
+ DRM_ERROR("no this channel %d\n", ch);
+ *formats = NULL;
+ return 0;
+ }
+}
+
+/* convert from fourcc format to ade format */
+static u32 ade_get_format(u32 pixel_format)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ade_formats); i++)
+ if (ade_formats[i].pixel_format == pixel_format)
+ return ade_formats[i].ade_format;
+
+ /* not found */
+ DRM_ERROR("Not found pixel format!!fourcc_format= %d\n", pixel_format);
+ return ADE_FORMAT_NOT_SUPPORT;
+}
+
+static void ade_init(struct ade_hw_ctx *ctx)
+{
+ void __iomem *base = ctx->base;
+
+ /* enable clk gate */
+ set_TOP_CTL_clk_gate_en(base, 1);
+ /* clear overlay */
+ writel(0, base + ADE_OVLY1_TRANS_CFG);
+ writel(0, base + ADE_OVLY_CTL);
+ writel(0, base + ADE_OVLYX_CTL(ADE_OVLY2));
+ /* clear reset and reload regs */
+ writel(0, base + ADE_SOFT_RST_SEL0);
+ writel(0, base + ADE_SOFT_RST_SEL1);
+ writel(0xFFFFFFFF, base + ADE_RELOAD_DIS0);
+ writel(0xFFFFFFFF, base + ADE_RELOAD_DIS1);
+ /* for video set to 1, means that ade registers
+ * became effective at frame end
+ */
+ set_TOP_CTL_frm_end_start(base, 1);
+}
+
+static void ade_ldi_set_mode(struct ade_crtc *acrtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->base;
+ u32 out_w = mode->hdisplay;
+ u32 out_h = mode->vdisplay;
+ u32 hfp, hbp, hsw, vfp, vbp, vsw;
+ u32 plr_flags;
+ int ret;
+
+ plr_flags = (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ? HISI_LDI_FLAG_NVSYNC : 0;
+ plr_flags |= (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ? HISI_LDI_FLAG_NHSYNC : 0;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ hsw = mode->hsync_end - mode->hsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+ vsw = mode->vsync_end - mode->vsync_start;
+ if (vsw > 15) {
+ DRM_INFO("vsw exceeded 15\n");
+ vsw = 15;
+ }
+
+ writel((hbp << 20) | (hfp << 0), base + LDI_HRZ_CTRL0);
+ /* p3-73 6220V100 pdf:
+ * "The configured value is the actual width - 1"
+ */
+ writel(hsw - 1, base + LDI_HRZ_CTRL1);
+ writel((vbp << 20) | (vfp << 0), base + LDI_VRT_CTRL0);
+ /* p3-74 6220V100 pdf:
+ * "The configured value is the actual width - 1"
+ */
+ writel(vsw - 1, base + LDI_VRT_CTRL1);
+
+ /* p3-75 6220V100 pdf:
+ * "The configured value is the actual width - 1"
+ */
+ writel(((out_h - 1) << 20) | ((out_w - 1) << 0),
+ base + LDI_DSP_SIZE);
+ writel(plr_flags, base + LDI_PLR_CTRL);
+
+ ret = clk_set_rate(ctx->ade_pix_clk, mode->clock * 1000);
+ /* Success should be guaranteed in aotomic_check
+ * failer shouldn't happen here
+ */
+ if (ret)
+ DRM_ERROR("set ade_pixel_clk_rate fail\n");
+ adj_mode->clock = clk_get_rate(ctx->ade_pix_clk) / 1000;
+
+ /* ctran6 setting */
+ writel(1, base + ADE_CTRAN_DIS(ADE_CTRAN6));
+ writel(out_w * out_h - 1, base + ADE_CTRAN_IMAGE_SIZE(ADE_CTRAN6));
+ acrtc->use_mask |= BIT(ADE_CTRAN_BIT_OFST + ADE_CTRAN6);
+ DRM_INFO("set mode: %dx%d\n", out_w, out_h);
+
+ /*
+ * other parameters setting
+ */
+ writel(BIT(0), base + LDI_WORK_MODE);
+ writel((0x3c << 6) | (ADE_OUT_RGB_888 << 3) | BIT(2) | BIT(0),
+ base + LDI_CTRL);
+ set_reg(base + LDI_DE_SPACE_LOW, 0x1, 1, 1);
+}
+
+static int ade_power_up(struct ade_hw_ctx *ctx)
+{
+ void __iomem *media_base = ctx->media_base;
+ int ret;
+
+ ret = clk_set_rate(ctx->ade_core_clk, ctx->ade_core_rate);
+ if (ret) {
+ DRM_ERROR("clk_set_rate ade_core_rate error\n");
+ return ret;
+ }
+ ret = clk_set_rate(ctx->media_noc_clk, ctx->media_noc_rate);
+ if (ret) {
+ DRM_ERROR("media_noc_clk media_noc_rate error\n");
+ return ret;
+ }
+ ret = clk_prepare_enable(ctx->media_noc_clk);
+ if (ret) {
+ DRM_ERROR("fail to clk_prepare_enable media_noc_clk\n");
+ return ret;
+ }
+
+ writel(0x20, media_base + SC_MEDIA_RSTDIS);
+
+ ret = clk_prepare_enable(ctx->ade_core_clk);
+ if (ret) {
+ DRM_ERROR("fail to clk_prepare_enable ade_core_clk\n");
+ return ret;
+ }
+
+ ade_init(ctx);
+ ctx->power_on = true;
+ return 0;
+}
+
+static void ade_power_down(struct ade_hw_ctx *ctx)
+{
+ void __iomem *base = ctx->base;
+ void __iomem *media_base = ctx->media_base;
+
+ set_LDI_CTRL_ldi_en(base, ADE_DISABLE);
+ /* dsi pixel off */
+ set_reg(base + LDI_HDMI_DSI_GT, 0x1, 1, 0);
+
+ clk_disable_unprepare(ctx->ade_core_clk);
+ writel(0x20, media_base + SC_MEDIA_RSTEN);
+ clk_disable_unprepare(ctx->media_noc_clk);
+ ctx->power_on = false;
+}
+
+static struct drm_crtc *hisi_get_crtc_from_index(struct drm_device *dev,
+ unsigned int index)
+{
+ unsigned int index_tmp = 0;
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (index_tmp == index)
+ return crtc;
+
+ index_tmp++;
+ }
+
+ WARN_ON(true);
+ return NULL;
+}
+
+int ade_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_crtc *crtc = hisi_get_crtc_from_index(dev, pipe);
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->base;
+ u32 intr_en;
+
+ DRM_INFO("enable_vblank enter.\n");
+ if (!ctx->power_on)
+ (void)ade_power_up(ctx);
+
+ intr_en = readl(base + LDI_INT_EN);
+ intr_en |= LDI_ISR_FRAME_END_INT;
+ writel(intr_en, base + LDI_INT_EN);
+
+ return 0;
+}
+
+void ade_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_crtc *crtc = hisi_get_crtc_from_index(dev, pipe);
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->base;
+ u32 intr_en;
+
+ DRM_INFO("disable_vblank enter.\n");
+ if (!ctx->power_on) {
+ DRM_ERROR("power is down! vblank disable fail\n");
+ return;
+ }
+ intr_en = readl(base + LDI_INT_EN);
+ intr_en &= ~LDI_ISR_FRAME_END_INT;
+ writel(intr_en, base + LDI_INT_EN);
+}
+
+static irqreturn_t ade_irq_handler(int irq, void *data)
+{
+ struct ade_crtc *acrtc = data;
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct drm_crtc *crtc = &acrtc->base;
+ struct drm_device *dev = crtc->dev;
+ void __iomem *base = ctx->base;
+ u32 status;
+
+ status = readl(base + LDI_MSK_INT);
+ /* DRM_INFO("LDI IRQ: status=0x%X\n",status); */
+
+ /* vblank irq */
+ if (status & LDI_ISR_FRAME_END_INT) {
+ writel(LDI_ISR_FRAME_END_INT, base + LDI_INT_CLR);
+ drm_handle_vblank(dev, drm_crtc_index(crtc));
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * set modules' reset mode: by software or hardware
+ * set modules' reload enable/disable
+ */
+static void ade_set_reset_and_reload(struct ade_crtc *acrtc)
+{
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->base;
+ u32 mask0 = (u32)acrtc->use_mask;
+ u32 mask1 = (u32)(acrtc->use_mask >> 32);
+
+ DRM_DEBUG_DRIVER("mask=0x%llX, mask0=0x%X, mask1=0x%X\n",
+ acrtc->use_mask, mask0, mask1);
+
+ writel(mask0, base + ADE_SOFT_RST_SEL0);
+ writel(mask1, base + ADE_SOFT_RST_SEL1);
+ writel(~mask0, base + ADE_RELOAD_DIS0);
+ writel(~mask1, base + ADE_RELOAD_DIS1);
+}
+
+void ade_set_medianoc_qos(struct ade_crtc *acrtc)
+{
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->media_noc_base;
+ void __iomem *reg;
+ u32 val;
+
+ reg = base + NOC_ADE0_QOSGENERATOR_MODE;
+ val = (readl(reg) & 0xfffffffc) | 0x2;
+ writel(val, reg);
+
+ reg = base + NOC_ADE0_QOSGENERATOR_EXTCONTROL;
+ val = readl(reg) | 0x1;
+ writel(val, reg);
+
+ reg = base + NOC_ADE1_QOSGENERATOR_MODE;
+ val = (readl(reg) & 0xfffffffc) | 0x2;
+ writel(val, reg);
+
+ reg = base + NOC_ADE1_QOSGENERATOR_EXTCONTROL;
+ val = readl(reg) | 0x1;
+ writel(val, reg);
+}
+
+/*
+ * commit to ldi to display
+ */
+static void ade_display_commit(struct ade_crtc *acrtc)
+{
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->base;
+
+ /* TODO: set rotator after overlay */
+
+ /* TODO: set scale after overlay */
+
+ /* display source setting */
+ writel(TOP_DISP_SRC_OVLY2, base + ADE_DISP_SRC_CFG);
+
+ /* set reset mode:soft or hw, and reload modules */
+ ade_set_reset_and_reload(acrtc);
+
+ DRM_INFO("ADE GO\n");
+ /* enable ade */
+ wmb();
+ writel(ADE_ENABLE, base + ADE_EN);
+ /* enable ldi */
+ wmb();
+ set_LDI_CTRL_ldi_en(base, ADE_ENABLE);
+ /* dsi pixel on */
+ set_reg(base + LDI_HDMI_DSI_GT, 0x0, 1, 0);
+}
+
+static void ade_crtc_enable(struct drm_crtc *crtc)
+{
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ int ret;
+
+ DRM_DEBUG_DRIVER("enter.\n");
+ if (acrtc->enable)
+ return;
+
+ if (!ctx->power_on) {
+ ret = ade_power_up(ctx);
+ if (ret) {
+ DRM_ERROR("failed to initialize ade clk\n");
+ return;
+ }
+ }
+
+ ade_set_medianoc_qos(acrtc);
+ ade_display_commit(acrtc);
+ acrtc->enable = true;
+
+ DRM_DEBUG_DRIVER("exit success.\n");
+}
+
+static void ade_crtc_disable(struct drm_crtc *crtc)
+{
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+
+ DRM_DEBUG_DRIVER("enter.\n");
+
+ if (!acrtc->enable)
+ return;
+
+ ade_power_down(ctx);
+ acrtc->use_mask = 0;
+ acrtc->enable = false;
+ DRM_DEBUG_DRIVER("exit success.\n");
+}
+
+int ade_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
+{
+ DRM_DEBUG_DRIVER("enter.\n");
+ DRM_DEBUG_DRIVER("exit success.\n");
+ /* do nothing */
+ return 0;
+}
+
+static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct drm_display_mode *mode = &crtc->state->mode;
+ struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
+
+ DRM_DEBUG_DRIVER("enter.\n");
+ if (!ctx->power_on)
+ (void)ade_power_up(ctx);
+ ade_ldi_set_mode(acrtc, mode, adj_mode);
+ DRM_DEBUG_DRIVER("exit success.\n");
+}
+
+static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+
+ DRM_DEBUG_DRIVER("enter.\n");
+ if (!ctx->power_on)
+ (void)ade_power_up(ctx);
+ DRM_DEBUG_DRIVER("exit success.\n");
+}
+
+static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+
+{
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->base;
+
+ DRM_DEBUG_DRIVER("enter.\n");
+ /* commit to display: LDI input setting */
+ if (acrtc->enable) {
+ /* set reset and reload */
+ ade_set_reset_and_reload(acrtc);
+ /* flush ade regitsters */
+ wmb();
+ writel(ADE_ENABLE, base + ADE_EN);
+ }
+ DRM_DEBUG_DRIVER("exit success.\n");
+}
+
+static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = {
+ .enable = ade_crtc_enable,
+ .disable = ade_crtc_disable,
+ .atomic_check = ade_crtc_atomic_check,
+ .mode_set_nofb = ade_crtc_mode_set_nofb,
+ .atomic_begin = ade_crtc_atomic_begin,
+ .atomic_flush = ade_crtc_atomic_flush,
+};
+
+static const struct drm_crtc_funcs ade_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .set_property = drm_atomic_helper_crtc_set_property,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int ade_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *plane)
+{
+ int ret;
+
+ ret = drm_crtc_init_with_planes(dev, crtc, plane,
+ NULL, &ade_crtc_funcs);
+ if (ret) {
+ DRM_ERROR("failed to init crtc.\n");
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &ade_crtc_helper_funcs);
+
+ return 0;
+}
+
+static void ade_rdma_set(struct ade_crtc *acrtc, struct drm_framebuffer *fb,
+ u32 ch, u32 y, u32 in_h, u32 fmt)
+{
+ u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
+ struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->base;
+ u32 stride = fb->pitches[0];
+ u32 addr = (u32)obj->paddr + y * stride;
+
+ DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x, \
+ addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n",
+ ch + 1, y, in_h, stride, (u32)obj->paddr,
+ addr, fb->width, fb->height,
+ fmt, drm_get_format_name(fb->pixel_format));
+
+ /* get reg offset */
+ reg_ctrl = RD_CH_CTRL(ch);
+ reg_addr = RD_CH_ADDR(ch);
+ reg_size = RD_CH_SIZE(ch);
+ reg_stride = RD_CH_STRIDE(ch);
+ reg_space = RD_CH_SPACE(ch);
+ reg_en = RD_CH_EN(ch);
+
+ /*
+ * TODO: set rotation
+ */
+ writel((fmt << 16) & 0x1f0000, base + reg_ctrl);
+ writel(addr, base + reg_addr);
+ writel((in_h << 16) | stride, base + reg_size);
+ writel(stride, base + reg_stride);
+ writel(in_h * stride, base + reg_space);
+ writel(1, base + reg_en);
+
+ acrtc->use_mask |= BIT(ADE_CH_RDMA_BIT_OFST + ch);
+}
+
+static void ade_rdma_disable(struct ade_crtc *acrtc, u32 ch)
+{
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->base;
+ u32 reg_en;
+
+ /* get reg offset */
+ reg_en = RD_CH_EN(ch);
+
+ writel(0, base + reg_en);
+ acrtc->use_mask &= ~BIT(ADE_CH_RDMA_BIT_OFST + ch);
+}
+
+static void ade_clip_set(struct ade_crtc *acrtc, u32 ch, u32 fb_w, u32 x,
+ u32 in_w, u32 in_h)
+{
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->base;
+ u32 disable_val;
+ u32 clip_left;
+ u32 clip_right;
+
+ /*
+ * clip width, no need to clip height
+ */
+ if (fb_w == in_w) { /* bypass */
+ disable_val = 1;
+ clip_left = 0;
+ clip_right = 0;
+ } else {
+ disable_val = 0;
+ clip_left = x;
+ clip_right = fb_w - (x + in_w) - 1;
+ }
+
+ DRM_DEBUG_DRIVER("clip%d: clip_left=%d, clip_right=%d\n",
+ ch + 1, clip_left, clip_right);
+
+ writel(disable_val, base + ADE_CLIP_DISABLE(ch));
+ writel((fb_w - 1) << 16 | (in_h - 1), base + ADE_CLIP_SIZE0(ch));
+ writel(clip_left << 16 | clip_right, base + ADE_CLIP_SIZE1(ch));
+
+ acrtc->use_mask |= BIT(ADE_CLIP_BIT_OFST + ch);
+}
+
+static void ade_clip_disable(struct ade_crtc *acrtc, u32 ch)
+{
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->base;
+
+ writel(1, base + ADE_CLIP_DISABLE(ch));
+ acrtc->use_mask &= ~BIT(ADE_CLIP_BIT_OFST + ch);
+}
+
+static bool has_Alpha_channel(int format)
+{
+ switch (format) {
+ case ADE_ARGB_8888:
+ case ADE_ABGR_8888:
+ case ADE_RGBA_8888:
+ case ADE_BGRA_8888:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void ade_get_blending_params(u32 fmt, u8 glb_alpha, u8 *alp_mode,
+ u8 *alp_sel, u8 *under_alp_sel)
+{
+ bool has_alpha = has_Alpha_channel(fmt);
+
+ /*
+ * get alp_mode
+ */
+ if (has_alpha && glb_alpha < 255)
+ *alp_mode = ADE_ALP_PIXEL_AND_GLB;
+ else if (has_alpha)
+ *alp_mode = ADE_ALP_PIXEL;
+ else
+ *alp_mode = ADE_ALP_GLOBAL;
+
+ /*
+ * get alp sel
+ */
+ *alp_sel = ADE_ALP_MUL_COEFF_3; /* 1 */
+ *under_alp_sel = ADE_ALP_MUL_COEFF_2; /* 0 */
+}
+
+static void ade_overlay_set(struct ade_crtc *acrtc, u8 ch, u32 x0, u32 y0,
+ u32 in_w, u32 in_h, u32 fmt)
+{
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->base;
+ u8 ovly_ch = 0;
+ u8 x = ADE_OVLY2;
+ u8 glb_alpha = 255;
+ u32 x1 = x0 + in_w - 1;
+ u32 y1 = y0 + in_h - 1;
+ u32 val;
+ u8 alp_sel;
+ u8 under_alp_sel;
+ u8 alp_mode;
+
+ ade_get_blending_params(fmt, glb_alpha, &alp_mode, &alp_sel,
+ &under_alp_sel);
+
+ /* overlay routing setting */
+ writel(x0 << 16 | y0, base + ADE_OVLY_CH_XY0(ovly_ch));
+ writel(x1 << 16 | y1, base + ADE_OVLY_CH_XY1(ovly_ch));
+ val = (ch + 1) << ADE_OVLY_CH_SEL_OFST | BIT(ADE_OVLY_CH_EN_OFST) |
+ alp_sel << ADE_OVLY_CH_ALP_SEL_OFST |
+ under_alp_sel << ADE_OVLY_CH_UNDER_ALP_SEL_OFST |
+ glb_alpha << ADE_OVLY_CH_ALP_GBL_OFST |
+ alp_mode << ADE_OVLY_CH_ALP_MODE_OFST;
+ DRM_DEBUG_DRIVER("ch%d_ctl=0x%X\n", ovly_ch + 1, val);
+ writel(val, base + ADE_OVLY_CH_CTL(ovly_ch));
+ val = (x + 1) << (ovly_ch * 4) | readl(base + ADE_OVLY_CTL);
+ DRM_DEBUG_DRIVER("ovly_ctl=0x%X\n", val);
+ writel(val, base + ADE_OVLY_CTL);
+
+ /* when primary is enable, indicate that it's ready to output. */
+ if (ch == PRIMARY_CH) {
+ val = (in_w - 1) << 16 | (in_h - 1);
+ writel(val, base + ADE_OVLY_OUTPUT_SIZE(x));
+ writel(1, base + ADE_OVLYX_CTL(x));
+ acrtc->use_mask |= BIT(ADE_OVLY_BIT_OFST + x);
+ }
+}
+
+static void ade_overlay_disable(struct ade_crtc *acrtc, u32 ch)
+{
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->base;
+ u8 ovly_ch = 0;
+ u32 val;
+
+ val = ~BIT(6) & readl(base + ADE_OVLY_CH_CTL(ovly_ch));
+ DRM_DEBUG_DRIVER("ch%d_ctl=0x%X\n", ovly_ch + 1, val);
+ writel(val, base + ADE_OVLY_CH_CTL(ovly_ch));
+ val = ~(0x3 << (ovly_ch * 4)) & readl(base + ADE_OVLY_CTL);
+
+ DRM_DEBUG_DRIVER("ovly_ctl=0x%X\n", val);
+ writel(val, base + ADE_OVLY_CTL);
+}
+
+/*
+ * Typicaly, a channel looks like: DMA-->clip-->scale-->ctrans-->overlay
+ */
+static void ade_update_channel(struct ade_plane *aplane, struct ade_crtc *acrtc,
+ struct drm_framebuffer *fb, int crtc_x,
+ int crtc_y, unsigned int crtc_w,
+ unsigned int crtc_h, u32 src_x,
+ u32 src_y, u32 src_w, u32 src_h)
+{
+ u8 ch = aplane->ch;
+ u32 fmt = ade_get_format(fb->pixel_format);
+ u32 in_w;
+ u32 in_h;
+
+ DRM_DEBUG_DRIVER("channel%d: src:(%d, %d)-%dx%d, crtc:(%d, %d)-%dx%d",
+ ch + 1, src_x, src_y, src_w, src_h,
+ crtc_x, crtc_y, crtc_w, crtc_h);
+
+ /* 1) DMA setting */
+ in_w = src_w;
+ in_h = src_h;
+ ade_rdma_set(acrtc, fb, ch, src_y, in_h, fmt);
+
+ /* 2) clip setting */
+ ade_clip_set(acrtc, ch, fb->width, src_x, in_w, in_h);
+
+ /* 3) TODO: scale setting for overlay planes */
+
+ /* 4) TODO: ctran/csc setting for overlay planes */
+
+ /* 5) overlay/compositor routing setting */
+ ade_overlay_set(acrtc, ch, crtc_x, crtc_y, in_w, in_h, fmt);
+
+ DRM_DEBUG_DRIVER("exit success.\n");
+}
+
+static void ade_disable_channel(struct ade_plane *aplane,
+ struct ade_crtc *acrtc)
+{
+ u32 ch = aplane->ch;
+
+ DRM_DEBUG_DRIVER("disable channel%d\n", ch + 1);
+
+ /*
+ * when primary is disable, power is down
+ * so no need to disable this channel.
+ */
+ if (ch == PRIMARY_CH)
+ return;
+
+ /* disable read DMA */
+ ade_rdma_disable(acrtc, ch);
+
+ /* disable clip */
+ ade_clip_disable(acrtc, ch);
+
+ /* disable overlay routing */
+ ade_overlay_disable(acrtc, ch);
+
+ DRM_DEBUG_DRIVER("exit success.\n");
+}
+
+static int ade_plane_prepare_fb(struct drm_plane *plane,
+ const struct drm_plane_state *new_state)
+{
+ DRM_DEBUG_DRIVER("enter.\n");
+ DRM_DEBUG_DRIVER("exit success.\n");
+ return 0;
+}
+
+static void ade_plane_cleanup_fb(struct drm_plane *plane,
+ const struct drm_plane_state *old_state)
+{
+ DRM_DEBUG_DRIVER("enter.\n");
+ DRM_DEBUG_DRIVER("exit success.\n");
+}
+
+static int ade_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state;
+ u32 src_x = state->src_x >> 16;
+ u32 src_y = state->src_y >> 16;
+ u32 src_w = state->src_w >> 16;
+ u32 src_h = state->src_h >> 16;
+ int crtc_x = state->crtc_x;
+ int crtc_y = state->crtc_y;
+ u32 crtc_w = state->crtc_w;
+ u32 crtc_h = state->crtc_h;
+
+ if (!crtc || !fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (src_w != crtc_w || src_h != crtc_h) {
+ DRM_ERROR("Scale not support!!!\n");
+ return -EINVAL;
+ }
+
+ if (src_x + src_w > fb->width ||
+ src_y + src_h > fb->height)
+ return -EINVAL;
+
+ if (crtc_x < 0 || crtc_y < 0)
+ return -EINVAL;
+
+ if (crtc_x + crtc_w > crtc_state->adjusted_mode.hdisplay ||
+ crtc_y + crtc_h > crtc_state->adjusted_mode.vdisplay)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ade_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane_state *state = plane->state;
+ struct ade_plane *aplane = to_ade_plane(plane);
+ struct ade_crtc *acrtc;
+
+ if (!state->crtc)
+ return;
+
+ acrtc = to_ade_crtc(state->crtc);
+ ade_update_channel(aplane, acrtc, state->fb,
+ state->crtc_x, state->crtc_y,
+ state->crtc_w, state->crtc_h,
+ state->src_x >> 16, state->src_y >> 16,
+ state->src_w >> 16, state->src_h >> 16);
+}
+
+static void ade_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct ade_plane *aplane = to_ade_plane(plane);
+ struct ade_crtc *acrtc;
+
+ if (!old_state->crtc)
+ return;
+ acrtc = to_ade_crtc(old_state->crtc);
+ ade_disable_channel(aplane, acrtc);
+}
+
+static const struct drm_plane_helper_funcs ade_plane_helper_funcs = {
+ .prepare_fb = ade_plane_prepare_fb,
+ .cleanup_fb = ade_plane_cleanup_fb,
+ .atomic_check = ade_plane_atomic_check,
+ .atomic_update = ade_plane_atomic_update,
+ .atomic_disable = ade_plane_atomic_disable,
+};
+
+static struct drm_plane_funcs ade_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static int ade_plane_init(struct drm_device *dev, struct ade_plane *aplane,
+ enum drm_plane_type type)
+{
+ const u32 *fmts;
+ u32 fmts_cnt;
+ int ret = 0;
+
+ /* get properties */
+ fmts_cnt = ade_get_channel_formats(aplane->ch, &fmts);
+ if (ret)
+ return ret;
+
+ ret = drm_universal_plane_init(dev, &aplane->base, 1, &ade_plane_funcs,
+ fmts, fmts_cnt, type);
+ if (ret) {
+ DRM_ERROR("fail to init plane, ch=%d\n", aplane->ch);
+ return ret;
+ }
+
+ drm_plane_helper_add(&aplane->base, &ade_plane_helper_funcs);
+
+ return 0;
+}
+
+static int ade_bind(struct device *dev, struct device *master, void *data)
+{
+ struct ade_data *ade = dev_get_drvdata(dev);
+ struct ade_hw_ctx *ctx = &ade->ctx;
+ struct ade_crtc *acrtc = &ade->acrtc;
+ struct drm_device *drm_dev = (struct drm_device *)data;
+ struct ade_plane *aplane;
+ enum drm_plane_type type;
+ int ret;
+ int i;
+
+ /*
+ * plane init
+ * TODO: Now only support primary plane, overlay planes
+ * need to do.
+ */
+ for (i = 0; i < ADE_CH_NUM; i++) {
+ aplane = &ade->aplane[i];
+ aplane->ch = i;
+ aplane->ctx = ctx;
+ type = i == PRIMARY_CH ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+
+ ret = ade_plane_init(drm_dev, aplane, type);
+ if (ret)
+ return ret;
+ }
+
+ /* crtc init */
+ acrtc->ctx = ctx;
+ ret = ade_crtc_init(drm_dev, &acrtc->base,
+ &ade->aplane[PRIMARY_CH].base);
+ if (ret)
+ return ret;
+
+ /* vblank irq init */
+ ret = request_irq(ctx->irq, ade_irq_handler, DRIVER_IRQ_SHARED,
+ drm_dev->driver->name, acrtc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ade_unbind(struct device *dev, struct device *master, void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops ade_ops = {
+ .bind = ade_bind,
+ .unbind = ade_unbind,
+};
+
+static int ade_dts_parse(struct platform_device *pdev, struct ade_hw_ctx *ctx)
+{
+ struct resource *res;
+ struct device *dev;
+ struct device_node *np;
+ int ret;
+
+ dev = &pdev->dev;
+ np = dev->of_node;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ade_base");
+ ctx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->base)) {
+ DRM_ERROR("failed to remap ade io base\n");
+ return PTR_ERR(ctx->base);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "media_base");
+ ctx->media_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->media_base)) {
+ DRM_ERROR("failed to remap media io base\n");
+ return PTR_ERR(ctx->media_base);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "media_noc_base");
+ ctx->media_noc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->media_noc_base)) {
+ DRM_ERROR("failed to remap media noc base\n");
+ return PTR_ERR(ctx->media_noc_base);
+ }
+
+ ctx->irq = platform_get_irq(pdev, 0);
+ if (ctx->irq < 0) {
+ DRM_ERROR("failed to parse the irq\n");
+ return -ENODEV;
+ }
+
+ ctx->ade_core_clk = devm_clk_get(&pdev->dev, "clk_ade_core");
+ if (!ctx->ade_core_clk) {
+ DRM_ERROR("failed to parse the ADE_CORE\n");
+ return -ENODEV;
+ }
+ ctx->media_noc_clk = devm_clk_get(&pdev->dev,
+ "aclk_codec_jpeg_src");
+ if (!ctx->media_noc_clk) {
+ DRM_ERROR("failed to parse the CODEC_JPEG\n");
+ return -ENODEV;
+ }
+ ctx->ade_pix_clk = devm_clk_get(&pdev->dev, "clk_ade_pix");
+ if (!ctx->ade_pix_clk) {
+ DRM_ERROR("failed to parse the ADE_PIX_SRC\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(np, "ade_core_clk_rate",
+ &ctx->ade_core_rate);
+ if (ret) {
+ DRM_ERROR("failed to parse the ade_core_clk_rate\n");
+ return -ENODEV;
+ }
+ ret = of_property_read_u32(np, "media_noc_clk_rate",
+ &ctx->media_noc_rate);
+ if (ret) {
+ DRM_ERROR("failed to parse the media_noc_clk_rate\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int ade_probe(struct platform_device *pdev)
+{
+ struct ade_data *ade;
+ int ret;
+
+ DRM_DEBUG_DRIVER("enter.\n");
+
+ ade = devm_kzalloc(&pdev->dev, sizeof(*ade), GFP_KERNEL);
+ if (!ade) {
+ DRM_ERROR("failed to alloc ade_data\n");
+ return -ENOMEM;
+ }
+
+ ret = ade_dts_parse(pdev, &ade->ctx);
+ if (ret) {
+ DRM_ERROR("failed to parse dts!!\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ade);
+
+ return component_add(&pdev->dev, &ade_ops);
+}
+
+static int ade_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &ade_ops);
+
+ return 0;
+}
+
+static const struct of_device_id ade_of_match[] = {
+ { .compatible = "hisilicon,hi6220-ade" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ade_of_match);
+
+static struct platform_driver ade_driver = {
+ .probe = ade_probe,
+ .remove = ade_remove,
+ .driver = {
+ .name = "hisi-ade",
+ .owner = THIS_MODULE,
+ .of_match_table = ade_of_match,
+ },
+};
+
+module_platform_driver(ade_driver);
+
+MODULE_AUTHOR("Xinliang Liu <xinliang.liu@linaro.org>");
+MODULE_AUTHOR("Xinliang Liu <z.liuxinliang@hisilicon.com>");
+MODULE_AUTHOR("Xinwei Kong <kong.kongxinwei@hisilicon.com>");
+MODULE_DESCRIPTION("Hisilicon DRM ADE(crtc/plane) Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/hisilicon/hisi_drm_ade.h b/drivers/gpu/drm/hisilicon/hisi_drm_ade.h
new file mode 100644
index 000000000000..73f09384e2e5
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hisi_drm_ade.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __HISI_DRM_ADE_H__
+#define __HISI_DRM_ADE_H__
+
+int ade_enable_vblank(struct drm_device *dev, unsigned int pipe);
+void ade_disable_vblank(struct drm_device *dev, unsigned int pipe);
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hisi_drm_drv.c b/drivers/gpu/drm/hisilicon/hisi_drm_drv.c
new file mode 100644
index 000000000000..76eb7118e731
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hisi_drm_drv.c
@@ -0,0 +1,280 @@
+/*
+ * Hisilicon SoCs drm master driver
+ *
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ * Author:
+ * Xinliang Liu <xinliang.liu@linaro.org>
+ * Xinliang Liu <z.liuxinliang@hisilicon.com>
+ * Xinwei Kong <kong.kongxinwei@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/of_platform.h>
+#include <linux/component.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "hisi_drm_ade.h"
+#include "hisi_drm_drv.h"
+
+#define DRIVER_NAME "hisi-drm"
+
+static int hisi_drm_unload(struct drm_device *dev)
+{
+ struct hisi_drm_private *priv = dev->dev_private;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ if (priv->fbdev) {
+ drm_fbdev_cma_fini(priv->fbdev);
+ priv->fbdev = NULL;
+ }
+#endif
+ drm_kms_helper_poll_fini(dev);
+ drm_vblank_cleanup(dev);
+ drm_mode_config_cleanup(dev);
+ devm_kfree(dev->dev, priv);
+ dev->dev_private = NULL;
+
+ return 0;
+}
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+static void hisi_fbdev_output_poll_changed(struct drm_device *dev)
+{
+ struct hisi_drm_private *priv = dev->dev_private;
+
+ if (priv->fbdev) {
+ drm_fbdev_cma_hotplug_event(priv->fbdev);
+ } else {
+ priv->fbdev = drm_fbdev_cma_init(dev, 32,
+ dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
+ if (IS_ERR(priv->fbdev))
+ priv->fbdev = NULL;
+ }
+}
+#endif
+
+static const struct drm_mode_config_funcs hisi_drm_mode_config_funcs = {
+ .fb_create = drm_fb_cma_create,
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ .output_poll_changed = hisi_fbdev_output_poll_changed,
+#endif
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void hisi_drm_mode_config_init(struct drm_device *dev)
+{
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+
+ dev->mode_config.funcs = &hisi_drm_mode_config_funcs;
+}
+
+static int hisi_drm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct hisi_drm_private *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev->dev_private = priv;
+ dev_set_drvdata(dev->dev, dev);
+
+ /* dev->mode_config initialization */
+ drm_mode_config_init(dev);
+ hisi_drm_mode_config_init(dev);
+
+ /* bind and init sub drivers */
+ ret = component_bind_all(dev->dev, dev);
+ if (ret) {
+ DRM_ERROR("failed to bind all component.\n");
+ goto err_mode_config_cleanup;
+ }
+
+ /* vblank init */
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret) {
+ DRM_ERROR("failed to initialize vblank.\n");
+ goto err_unbind_all;
+ }
+ /* with irq_enabled = true, we can use the vblank feature. */
+ dev->irq_enabled = true;
+
+ /* reset all the states of crtc/plane/encoder/connector */
+ drm_mode_config_reset(dev);
+
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(dev);
+
+ /* force detection after connectors init */
+ (void)drm_helper_hpd_irq_event(dev);
+
+ return 0;
+
+err_unbind_all:
+ component_unbind_all(dev->dev, dev);
+err_mode_config_cleanup:
+ drm_mode_config_cleanup(dev);
+ devm_kfree(dev->dev, priv);
+ dev->dev_private = NULL;
+
+ return ret;
+}
+
+static const struct file_operations hisi_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct dma_buf *hisi_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags)
+{
+ /* we want to be able to write in mmapped buffer */
+ flags |= O_RDWR;
+ return drm_gem_prime_export(dev, obj, flags);
+}
+
+static int hisi_gem_cma_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+ /* mali gpu need pitch 8 bytes alignment for 32bpp */
+ args->pitch = roundup(min_pitch, 8);
+
+ return drm_gem_cma_dumb_create_internal(file, dev, args);
+}
+
+static struct drm_driver hisi_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC | DRIVER_HAVE_IRQ,
+ .load = hisi_drm_load,
+ .unload = hisi_drm_unload,
+ .fops = &hisi_drm_fops,
+ .set_busid = drm_platform_set_busid,
+
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = hisi_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = hisi_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = ade_enable_vblank,
+ .disable_vblank = ade_disable_vblank,
+
+ .name = "hisi",
+ .desc = "Hisilicon SoCs' DRM Driver",
+ .date = "20150718",
+ .major = 1,
+ .minor = 0,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int hisi_drm_bind(struct device *dev)
+{
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ return drm_platform_init(&hisi_drm_driver, to_platform_device(dev));
+}
+
+static void hisi_drm_unbind(struct device *dev)
+{
+ drm_put_dev(dev_get_drvdata(dev));
+}
+
+static const struct component_master_ops hisi_drm_ops = {
+ .bind = hisi_drm_bind,
+ .unbind = hisi_drm_unbind,
+};
+
+static int hisi_drm_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *child_np;
+ struct component_match *match = NULL;
+
+ of_platform_populate(node, NULL, NULL, dev);
+
+ child_np = of_get_next_available_child(node, NULL);
+ while (child_np) {
+ component_match_add(dev, &match, compare_of, child_np);
+ of_node_put(child_np);
+ child_np = of_get_next_available_child(node, child_np);
+ }
+
+ return component_master_add_with_match(dev, &hisi_drm_ops, match);
+
+ return 0;
+}
+
+static int hisi_drm_platform_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &hisi_drm_ops);
+ of_platform_depopulate(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id hisi_drm_dt_ids[] = {
+ { .compatible = "hisilicon,hi6220-dss", },
+ { /* end node */ },
+};
+MODULE_DEVICE_TABLE(of, hisi_drm_dt_ids);
+
+static struct platform_driver hisi_drm_platform_driver = {
+ .probe = hisi_drm_platform_probe,
+ .remove = hisi_drm_platform_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .of_match_table = hisi_drm_dt_ids,
+ },
+};
+
+module_platform_driver(hisi_drm_platform_driver);
+
+MODULE_AUTHOR("Xinliang Liu <xinliang.liu@linaro.org>");
+MODULE_AUTHOR("Xinliang Liu <z.liuxinliang@hisilicon.com>");
+MODULE_AUTHOR("Xinwei Kong <kong.kongxinwei@hisilicon.com>");
+MODULE_DESCRIPTION("hisilicon SoCs' DRM master driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/hisilicon/hisi_drm_drv.h b/drivers/gpu/drm/hisilicon/hisi_drm_drv.h
new file mode 100644
index 000000000000..984121f45489
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hisi_drm_drv.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __HISI_DRM_DRV_H__
+#define __HISI_DRM_DRV_H__
+
+struct hisi_drm_private {
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ struct drm_fbdev_cma *fbdev;
+#endif
+};
+
+#endif /* __HISI_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/hisilicon/hisi_drm_dsi.c b/drivers/gpu/drm/hisilicon/hisi_drm_dsi.c
new file mode 100644
index 000000000000..83c5445c260d
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hisi_drm_dsi.c
@@ -0,0 +1,832 @@
+/*
+ * Hisilicon hi6220 SoC dsi driver
+ *
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ * Author:
+ * Xinliang Liu <xinliang.liu@linaro.org>
+ * Xinliang Liu <z.liuxinliang@hisilicon.com>
+ * Xinwei Kong <kong.kongxinwei@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/drm_atomic_helper.h>
+
+#include "hisi_dsi_reg.h"
+
+#define MAX_TX_ESC_CLK (10)
+#define ROUND(x, y) ((x) / (y) + ((x) % (y) * 10 / (y) >= 5 ? 1 : 0))
+#define DEFAULT_MIPI_CLK_RATE 19200000
+#define DEFAULT_MIPI_CLK_PERIOD_PS (1000000000 / (DEFAULT_MIPI_CLK_RATE / 1000))
+#define R(x) ((u32)((((u64)(x) * (u64)1000 * (u64)mode->clock) / \
+ phy->lane_byte_clk_kHz)))
+
+#define encoder_to_dsi(encoder) \
+ container_of(encoder, struct hisi_dsi, encoder)
+#define host_to_dsi(host) \
+ container_of(host, struct hisi_dsi, host)
+
+struct mipi_phy_register {
+ u32 clk_t_lpx;
+ u32 clk_t_hs_prepare;
+ u32 clk_t_hs_zero;
+ u32 clk_t_hs_trial;
+ u32 clk_t_wakeup;
+ u32 data_t_lpx;
+ u32 data_t_hs_prepare;
+ u32 data_t_hs_zero;
+ u32 data_t_hs_trial;
+ u32 data_t_ta_go;
+ u32 data_t_ta_get;
+ u32 data_t_wakeup;
+ u32 hstx_ckg_sel;
+ u32 pll_fbd_div5f;
+ u32 pll_fbd_div1f;
+ u32 pll_fbd_2p;
+ u32 pll_enbwt;
+ u32 pll_fbd_p;
+ u32 pll_fbd_s;
+ u32 pll_pre_div1p;
+ u32 pll_pre_p;
+ u32 pll_vco_750M;
+ u32 pll_lpf_rs;
+ u32 pll_lpf_cs;
+ u32 clklp2hs_time;
+ u32 clkhs2lp_time;
+ u32 lp2hs_time;
+ u32 hs2lp_time;
+ u32 clk_to_data_delay;
+ u32 data_to_clk_delay;
+ u32 lane_byte_clk_kHz;
+ u32 clk_division;
+};
+
+struct dsi_hw_ctx {
+ void __iomem *base;
+ struct clk *dsi_cfg_clk;
+};
+
+struct hisi_dsi {
+ struct drm_encoder encoder;
+ struct drm_bridge *bridge;
+ struct mipi_dsi_host host;
+ struct drm_display_mode cur_mode;
+ struct dsi_hw_ctx *ctx;
+ struct mipi_phy_register phy;
+
+ u32 lanes;
+ enum mipi_dsi_pixel_format format;
+ unsigned long mode_flags;
+ bool enable;
+};
+
+struct dsi_data {
+ struct hisi_dsi dsi;
+ struct dsi_hw_ctx ctx;
+};
+
+struct dsi_phy_seq_info {
+ u32 min_range_kHz;
+ u32 max_range_kHz;
+ u32 pll_vco_750M;
+ u32 hstx_ckg_sel;
+};
+
+static const struct dsi_phy_seq_info dphy_seq_info[] = {
+ { 46000, 62000, 1, 7 },
+ { 62000, 93000, 0, 7 },
+ { 93000, 125000, 1, 6 },
+ { 125000, 187000, 0, 6 },
+ { 187000, 250000, 1, 5 },
+ { 250000, 375000, 0, 5 },
+ { 375000, 500000, 1, 4 },
+ { 500000, 750000, 0, 4 },
+ { 750000, 1000000, 1, 0 },
+ { 1000000, 1500000, 0, 0 }
+};
+
+static void set_dsi_phy_rate_equal_or_faster(u32 phy_freq_kHz,
+ struct mipi_phy_register *phy)
+{
+ u32 ui = 0;
+ u32 cfg_clk_ps = DEFAULT_MIPI_CLK_PERIOD_PS;
+ u32 i = 0;
+ u32 q_pll = 1;
+ u32 m_pll = 0;
+ u32 n_pll = 0;
+ u32 r_pll = 1;
+ u32 m_n = 0;
+ u32 m_n_int = 0;
+ u64 f_kHz;
+ u64 temp;
+ u64 tmp_kHz = phy_freq_kHz;
+
+ do {
+ f_kHz = tmp_kHz;
+
+ /* Find the PLL clock range from the table */
+ for (i = 0; i < ARRAY_SIZE(dphy_seq_info); i++)
+ if (f_kHz > dphy_seq_info[i].min_range_kHz &&
+ f_kHz <= dphy_seq_info[i].max_range_kHz)
+ break;
+
+ if (i == ARRAY_SIZE(dphy_seq_info)) {
+ DRM_ERROR("%lldkHz out of range\n", f_kHz);
+ return;
+ }
+
+ phy->pll_vco_750M = dphy_seq_info[i].pll_vco_750M;
+ phy->hstx_ckg_sel = dphy_seq_info[i].hstx_ckg_sel;
+
+ if (phy->hstx_ckg_sel <= 7 &&
+ phy->hstx_ckg_sel >= 4)
+ q_pll = 0x10 >> (7 - phy->hstx_ckg_sel);
+
+ temp = f_kHz * (u64)q_pll * (u64)cfg_clk_ps;
+ m_n_int = temp / (u64)1000000000;
+ m_n = (temp % (u64)1000000000) / (u64)100000000;
+
+ if (m_n_int % 2 == 0) {
+ if (m_n * 6 >= 50) {
+ n_pll = 2;
+ m_pll = (m_n_int + 1) * n_pll;
+ } else if (m_n * 6 >= 30) {
+ n_pll = 3;
+ m_pll = m_n_int * n_pll + 2;
+ } else {
+ n_pll = 1;
+ m_pll = m_n_int * n_pll;
+ }
+ } else {
+ if (m_n * 6 >= 50) {
+ n_pll = 1;
+ m_pll = (m_n_int + 1) * n_pll;
+ } else if (m_n * 6 >= 30) {
+ n_pll = 1;
+ m_pll = (m_n_int + 1) * n_pll;
+ } else if (m_n * 6 >= 10) {
+ n_pll = 3;
+ m_pll = m_n_int * n_pll + 1;
+ } else {
+ n_pll = 2;
+ m_pll = m_n_int * n_pll;
+ }
+ }
+
+ if (n_pll == 1) {
+ phy->pll_fbd_p = 0;
+ phy->pll_pre_div1p = 1;
+ } else {
+ phy->pll_fbd_p = n_pll;
+ phy->pll_pre_div1p = 0;
+ }
+
+ if (phy->pll_fbd_2p <= 7 && phy->pll_fbd_2p >= 4)
+ r_pll = 0x10 >> (7 - phy->pll_fbd_2p);
+
+ if (m_pll == 2) {
+ phy->pll_pre_p = 0;
+ phy->pll_fbd_s = 0;
+ phy->pll_fbd_div1f = 0;
+ phy->pll_fbd_div5f = 1;
+ } else if (m_pll >= 2 * 2 * r_pll && m_pll <= 2 * 4 * r_pll) {
+ phy->pll_pre_p = m_pll / (2 * r_pll);
+ phy->pll_fbd_s = 0;
+ phy->pll_fbd_div1f = 1;
+ phy->pll_fbd_div5f = 0;
+ } else if (m_pll >= 2 * 5 * r_pll && m_pll <= 2 * 150 * r_pll) {
+ if (((m_pll / (2 * r_pll)) % 2) == 0) {
+ phy->pll_pre_p =
+ (m_pll / (2 * r_pll)) / 2 - 1;
+ phy->pll_fbd_s =
+ (m_pll / (2 * r_pll)) % 2 + 2;
+ } else {
+ phy->pll_pre_p =
+ (m_pll / (2 * r_pll)) / 2;
+ phy->pll_fbd_s =
+ (m_pll / (2 * r_pll)) % 2;
+ }
+ phy->pll_fbd_div1f = 0;
+ phy->pll_fbd_div5f = 0;
+ } else {
+ phy->pll_pre_p = 0;
+ phy->pll_fbd_s = 0;
+ phy->pll_fbd_div1f = 0;
+ phy->pll_fbd_div5f = 1;
+ }
+
+ f_kHz = (u64)1000000000 * (u64)m_pll /
+ ((u64)cfg_clk_ps * (u64)n_pll * (u64)q_pll);
+
+ if (f_kHz >= phy_freq_kHz)
+ break;
+
+ tmp_kHz += 10;
+
+ } while (1);
+
+ ui = 1000000 / f_kHz;
+
+ phy->clk_t_lpx = ROUND(50, 8 * ui);
+ phy->clk_t_hs_prepare = ROUND(133, 16 * ui) - 1;
+
+ phy->clk_t_hs_zero = ROUND(262, 8 * ui);
+ phy->clk_t_hs_trial = 2 * (ROUND(60, 8 * ui) - 1);
+ phy->clk_t_wakeup = ROUND(1000000, (cfg_clk_ps / 1000) - 1);
+ if (phy->clk_t_wakeup > 0xff)
+ phy->clk_t_wakeup = 0xff;
+ phy->data_t_wakeup = phy->clk_t_wakeup;
+ phy->data_t_lpx = phy->clk_t_lpx;
+ phy->data_t_hs_prepare = ROUND(125 + 10 * ui, 16 * ui) - 1;
+ phy->data_t_hs_zero = ROUND(105 + 6 * ui, 8 * ui);
+ phy->data_t_hs_trial = 2 * (ROUND(60 + 4 * ui, 8 * ui) - 1);
+ phy->data_t_ta_go = 3;
+ phy->data_t_ta_get = 4;
+
+ phy->pll_enbwt = 1;
+ phy->clklp2hs_time = ROUND(407, 8 * ui) + 12;
+ phy->clkhs2lp_time = ROUND(105 + 12 * ui, 8 * ui);
+ phy->lp2hs_time = ROUND(240 + 12 * ui, 8 * ui) + 1;
+ phy->hs2lp_time = phy->clkhs2lp_time;
+ phy->clk_to_data_delay = 1 + phy->clklp2hs_time;
+ phy->data_to_clk_delay = ROUND(60 + 52 * ui, 8 * ui) +
+ phy->clkhs2lp_time;
+
+ phy->lane_byte_clk_kHz = f_kHz / 8;
+ phy->clk_division = phy->lane_byte_clk_kHz / MAX_TX_ESC_CLK;
+ if (phy->lane_byte_clk_kHz % MAX_TX_ESC_CLK)
+ phy->clk_division++;
+}
+
+static u32 dsi_get_dpi_color_coding(enum mipi_dsi_pixel_format format)
+{
+ u32 val;
+
+ /* TODO: only support RGB888 now, to support more */
+ switch (format) {
+ case MIPI_DSI_FMT_RGB888:
+ val = DSI_24BITS_1;
+ break;
+ default:
+ val = DSI_24BITS_1;
+ break;
+ }
+
+ return val;
+}
+
+static void dsi_mipi_phy_clks(void __iomem *base,
+ struct mipi_phy_register *phy,
+ u32 lanes)
+{
+ u32 delay_count;
+ bool is_ready;
+ u32 val;
+ u32 i;
+
+ /* set lanes value */
+ val = (lanes - 1) | (PHY_STOP_WAIT_TIME << 8);
+ writel(val, base + PHY_IF_CFG);
+
+ /* set phy clk division */
+ val = readl(base + CLKMGR_CFG) | phy->clk_division;
+ writel(val, base + CLKMGR_CFG);
+
+ /* clean up phy set param */
+ writel(0, base + PHY_RSTZ);
+ writel(0, base + PHY_TST_CTRL0);
+ writel(1, base + PHY_TST_CTRL0);
+ writel(0, base + PHY_TST_CTRL0);
+
+ /* clock lane Timing control - TLPX */
+ dsi_phy_tst_set(base, 0x10010, phy->clk_t_lpx);
+
+ /* clock lane Timing control - THS-PREPARE */
+ dsi_phy_tst_set(base, 0x10011, phy->clk_t_hs_prepare);
+
+ /* clock lane Timing control - THS-ZERO */
+ dsi_phy_tst_set(base, 0x10012, phy->clk_t_hs_zero);
+
+ /* clock lane Timing control - THS-TRAIL */
+ dsi_phy_tst_set(base, 0x10013, phy->clk_t_hs_trial);
+
+ /* clock lane Timing control - TWAKEUP */
+ dsi_phy_tst_set(base, 0x10014, phy->clk_t_wakeup);
+
+ /* data lane */
+ for (i = 0; i < lanes; i++) {
+ /* Timing control - TLPX*/
+ dsi_phy_tst_set(base, 0x10020 + (i << 4), phy->data_t_lpx);
+
+ /* Timing control - THS-PREPARE */
+ dsi_phy_tst_set(base, 0x10021 + (i << 4),
+ phy->data_t_hs_prepare);
+
+ /* Timing control - THS-ZERO */
+ dsi_phy_tst_set(base, 0x10022 + (i << 4), phy->data_t_hs_zero);
+
+ /* Timing control - THS-TRAIL */
+ dsi_phy_tst_set(base, 0x10023 + (i << 4), phy->data_t_hs_trial);
+
+ /* Timing control - TTA-GO */
+ dsi_phy_tst_set(base, 0x10024 + (i << 4), phy->data_t_ta_go);
+
+ /* Timing control - TTA-GET */
+ dsi_phy_tst_set(base, 0x10025 + (i << 4), phy->data_t_ta_get);
+
+ /* Timing control - TWAKEUP */
+ dsi_phy_tst_set(base, 0x10026 + (i << 4), phy->data_t_wakeup);
+ }
+
+ /* physical configuration I */
+ dsi_phy_tst_set(base, 0x10060, phy->hstx_ckg_sel);
+
+ /* physical configuration pll II */
+ val = (phy->pll_fbd_div5f << 5) + (phy->pll_fbd_div1f << 4) +
+ (phy->pll_fbd_2p << 1) + phy->pll_enbwt;
+ dsi_phy_tst_set(base, 0x10063, val);
+
+ /* physical configuration pll II */
+ dsi_phy_tst_set(base, 0x10064, phy->pll_fbd_p);
+
+ /* physical configuration pll III */
+ dsi_phy_tst_set(base, 0x10065, phy->pll_fbd_s);
+
+ /*physical configuration pll IV*/
+ val = (phy->pll_pre_div1p << 7) + phy->pll_pre_p;
+ dsi_phy_tst_set(base, 0x10066, val);
+
+ /*physical configuration pll V*/
+ val = (phy->pll_vco_750M << 4) + (phy->pll_lpf_rs << 2) +
+ phy->pll_lpf_cs + BIT(5);
+ dsi_phy_tst_set(base, 0x10067, val);
+
+ writel(BIT(2), base + PHY_RSTZ);
+ udelay(1);
+ writel(BIT(2) | BIT(0), base + PHY_RSTZ);
+ udelay(1);
+ writel(BIT(2) | BIT(1) | BIT(0), base + PHY_RSTZ);
+ usleep_range(1000, 1500);
+
+ /* wait for phy's clock ready */
+ delay_count = 0;
+ is_ready = false;
+ while (1) {
+ val = readl(base + PHY_STATUS);
+ if (((BIT(0) | BIT(2)) & val) || delay_count > 100) {
+ is_ready = (delay_count < 100) ? true : false;
+ delay_count = 0;
+ break;
+ }
+
+ udelay(1);
+ ++delay_count;
+ }
+
+ if (!is_ready)
+ DRM_INFO("phylock and phystopstateclklane is not ready.\n");
+}
+
+static void dsi_set_mode_timing(void __iomem *base,
+ struct mipi_phy_register *phy,
+ struct drm_display_mode *mode,
+ enum mipi_dsi_pixel_format format)
+{
+ u32 hfp, hbp, hsw, vfp, vbp, vsw;
+ u32 hline_time;
+ u32 hsa_time;
+ u32 hbp_time;
+ u32 pixel_clk_kHz;
+ int htot, vtot;
+ u32 val;
+
+ /* DSI color coding setting */
+ val = dsi_get_dpi_color_coding(format);
+ writel(val, base + DPI_COLOR_CODING);
+
+ /* DSI format and pol setting */
+ val = (mode->flags & DRM_MODE_FLAG_NHSYNC ? 1 : 0) << 2;
+ val |= (mode->flags & DRM_MODE_FLAG_NVSYNC ? 1 : 0) << 1;
+ writel(val, base + DPI_CFG_POL);
+
+ /*
+ * The DSI IP accepts vertical timing using lines as normal,
+ * but horizontal timing is a mixture of pixel-clocks for the
+ * active region and byte-lane clocks for the blanking-related
+ * timings. hfp is specified as the total hline_time in byte-
+ * lane clocks minus hsa, hbp and active.
+ */
+ pixel_clk_kHz = mode->clock;
+ htot = mode->htotal;
+ vtot = mode->vtotal;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ hsw = mode->hsync_end - mode->hsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+ vsw = mode->vsync_end - mode->vsync_start;
+ if (vsw > 15) {
+ DRM_INFO("vsw exceeded 15\n");
+ vtot -= vsw - 15;
+ vsw = 15;
+ }
+
+ hsa_time = (hsw * phy->lane_byte_clk_kHz) / pixel_clk_kHz;
+ hbp_time = (hbp * phy->lane_byte_clk_kHz) / pixel_clk_kHz;
+ hline_time = (((u64)htot * (u64)phy->lane_byte_clk_kHz)) /
+ pixel_clk_kHz;
+
+ if ((R(hline_time) / 1000) > htot) {
+ DRM_INFO("--: hline_time=%d\n", hline_time);
+ hline_time--;
+ }
+
+ if ((R(hline_time) / 1000) < htot) {
+ DRM_INFO("++: hline_time=%d\n", hline_time);
+ hline_time++;
+ }
+
+ /* all specified in byte-lane clocks */
+ writel(hsa_time, base + VID_HSA_TIME);
+ writel(hbp_time, base + VID_HBP_TIME);
+ writel(hline_time, base + VID_HLINE_TIME);
+
+ writel(vsw, base + VID_VSA_LINES);
+ writel(vbp, base + VID_VBP_LINES);
+ writel(vfp, base + VID_VFP_LINES);
+ writel(mode->vdisplay, base + VID_VACTIVE_LINES);
+ writel(mode->hdisplay, base + VID_PKT_SIZE);
+}
+
+static void dsi_set_video_mode_type(void __iomem *base,
+ struct mipi_phy_register *phy,
+ unsigned long flags)
+{
+ u32 val;
+ u32 mode_mask = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ u32 non_burst_sync_pulse = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ u32 non_burst_sync_event = MIPI_DSI_MODE_VIDEO;
+
+ /*
+ * choose video type
+ */
+ if ((flags & mode_mask) == non_burst_sync_pulse)
+ val = DSI_NON_BURST_SYNC_PULSES;
+ else if ((flags & mode_mask) == non_burst_sync_event)
+ val = DSI_NON_BURST_SYNC_EVENTS;
+ else
+ val = DSI_BURST_SYNC_PULSES_1;
+
+ writel(val, base + VID_MODE_CFG);
+ /* TODO: to support LCD panel need to set LP command transfer */
+}
+
+static void dsi_mipi_init(struct hisi_dsi *dsi)
+{
+ struct dsi_hw_ctx *ctx = dsi->ctx;
+ struct mipi_phy_register *phy = &dsi->phy;
+ struct drm_display_mode *mode = &dsi->cur_mode;
+ void __iomem *base = ctx->base;
+ u32 dphy_freq_kHz;
+
+ /* count phy params */
+ dphy_freq_kHz = mode->clock * 24 / dsi->lanes;
+ set_dsi_phy_rate_equal_or_faster(dphy_freq_kHz, phy);
+
+ /* reset Core */
+ writel(0, base + PWR_UP);
+
+ /* set phy clocks */
+ dsi_mipi_phy_clks(base, phy, dsi->lanes);
+
+ /* set dsi mode */
+ dsi_set_mode_timing(base, phy, mode, dsi->format);
+
+ /* set video mode type and low power */
+ dsi_set_video_mode_type(base, phy, dsi->mode_flags);
+
+ /* DSI and D-PHY Initialization */
+ writel(DSI_VIDEO_MODE, base + MODE_CFG);
+ writel(BIT(0), base + LPCLK_CTRL);
+ writel(BIT(0), base + PWR_UP);
+
+ DRM_INFO("lanes=%d, pixel_clk=%d kHz, bytes_freq=%d kHz\n",
+ dsi->lanes, mode->clock, phy->lane_byte_clk_kHz);
+}
+
+static void dsi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct hisi_dsi *dsi = encoder_to_dsi(encoder);
+ struct dsi_hw_ctx *ctx = dsi->ctx;
+ void __iomem *base = ctx->base;
+
+ DRM_DEBUG_DRIVER("enter\n");
+ if (!dsi->enable)
+ return;
+
+ writel(0, base + PWR_UP);
+ writel(0, base + LPCLK_CTRL);
+ writel(0, base + PHY_RSTZ);
+ clk_disable_unprepare(ctx->dsi_cfg_clk);
+
+ dsi->enable = false;
+ DRM_DEBUG_DRIVER("exit success.\n");
+}
+
+static void dsi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct hisi_dsi *dsi = encoder_to_dsi(encoder);
+ struct dsi_hw_ctx *ctx = dsi->ctx;
+ int ret;
+
+ DRM_DEBUG_DRIVER("enter.\n");
+ if (dsi->enable)
+ return;
+
+ /* mipi dphy clock enable */
+ ret = clk_prepare_enable(ctx->dsi_cfg_clk);
+ if (ret) {
+ DRM_ERROR("fail to enable dsi_cfg_clk: %d\n", ret);
+ return;
+ }
+
+ dsi_mipi_init(dsi);
+
+ dsi->enable = true;
+ DRM_DEBUG_DRIVER("exit success.\n");
+}
+
+static void dsi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct hisi_dsi *dsi = encoder_to_dsi(encoder);
+
+ DRM_DEBUG_DRIVER("enter.\n");
+ drm_mode_copy(&dsi->cur_mode, adj_mode);
+ DRM_DEBUG_DRIVER("exit success.\n");
+}
+
+static int dsi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_display_mode *mode = &crtc_state->mode;
+
+ DRM_DEBUG_DRIVER("enter.\n");
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ DRM_ERROR("not support INTERLACE mode\n");
+ return MODE_NO_INTERLACE;
+ }
+
+ /* pixel clock support range is (1190494208/64, 1190494208)Hz */
+ if (mode->clock < 18602 || mode->clock > 1190494) {
+ DRM_ERROR("mode clock not support\n");
+ return MODE_CLOCK_RANGE;
+ }
+
+ DRM_DEBUG_DRIVER("exit success.\n");
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs hisi_encoder_helper_funcs = {
+ .atomic_check = dsi_encoder_atomic_check,
+ .mode_set = dsi_encoder_mode_set,
+ .enable = dsi_encoder_enable,
+ .disable = dsi_encoder_disable
+};
+
+static const struct drm_encoder_funcs hisi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int hisi_drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ int ret;
+
+ encoder->possible_crtcs = 1;
+ ret = drm_encoder_init(dev, encoder, &hisi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ if (ret) {
+ DRM_ERROR("failed to init dsi encoder\n");
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &hisi_encoder_helper_funcs);
+
+ return 0;
+}
+
+static int dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *mdsi)
+{
+ struct hisi_dsi *dsi = host_to_dsi(host);
+
+ if (mdsi->lanes < 1 || mdsi->lanes > 4) {
+ DRM_ERROR("dsi device params invalid\n");
+ return -EINVAL;
+ }
+
+ dsi->lanes = mdsi->lanes;
+ dsi->format = mdsi->format;
+ dsi->mode_flags = mdsi->mode_flags;
+
+ return 0;
+}
+
+static int dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *mdsi)
+{
+ /* do nothing */
+ return 0;
+}
+
+static struct mipi_dsi_host_ops dsi_host_ops = {
+ .attach = dsi_host_attach,
+ .detach = dsi_host_detach,
+};
+
+static int dsi_host_init(struct device *dev, struct hisi_dsi *dsi)
+{
+ struct mipi_dsi_host *host = &dsi->host;
+ int ret;
+
+ host->dev = dev;
+ host->ops = &dsi_host_ops;
+ ret = mipi_dsi_host_register(host);
+ if (ret) {
+ DRM_ERROR("failed to register dsi host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dsi_bridge_init(struct drm_device *dev, struct hisi_dsi *dsi)
+{
+ struct drm_encoder *encoder = &dsi->encoder;
+ struct drm_bridge *bridge = dsi->bridge;
+ int ret;
+
+ /* associate the bridge to dsi encoder */
+ encoder->bridge = bridge;
+ bridge->encoder = encoder;
+
+ ret = drm_bridge_attach(dev, bridge);
+ if (ret) {
+ DRM_ERROR("failed to attach exteranl bridge\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct dsi_data *ddata = dev_get_drvdata(dev);
+ struct hisi_dsi *dsi = &ddata->dsi;
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ ret = hisi_drm_encoder_init(drm_dev, &dsi->encoder);
+ if (ret)
+ return ret;
+
+ ret = dsi_host_init(dev, dsi);
+ if (ret)
+ return ret;
+
+ ret = dsi_bridge_init(drm_dev, dsi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void dsi_unbind(struct device *dev, struct device *master, void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops dsi_ops = {
+ .bind = dsi_bind,
+ .unbind = dsi_unbind,
+};
+
+static int dsi_parse_dt(struct platform_device *pdev, struct hisi_dsi *dsi)
+{
+ struct dsi_hw_ctx *ctx = dsi->ctx;
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *endpoint, *bridge_node;
+ struct drm_bridge *bridge;
+ struct resource *res;
+
+ /*
+ * Get the endpoint node. In our case, dsi has one output port
+ * to which the external HDMI bridge is connected.
+ */
+ endpoint = of_graph_get_next_endpoint(np, NULL);
+ if (!endpoint) {
+ DRM_ERROR("no valid endpoint node\n");
+ return -ENODEV;
+ }
+ of_node_put(endpoint);
+
+ bridge_node = of_graph_get_remote_port_parent(endpoint);
+ if (!bridge_node) {
+ DRM_ERROR("no valid bridge node\n");
+ return -ENODEV;
+ }
+ of_node_put(bridge_node);
+
+ bridge = of_drm_find_bridge(bridge_node);
+ if (!bridge) {
+ DRM_INFO("wait for external HDMI bridge driver.\n");
+ return -EPROBE_DEFER;
+ }
+ dsi->bridge = bridge;
+
+ ctx->dsi_cfg_clk = devm_clk_get(&pdev->dev, "pclk_dsi");
+ if (IS_ERR(ctx->dsi_cfg_clk)) {
+ DRM_ERROR("failed to get dsi plck clock\n");
+ return PTR_ERR(ctx->dsi_cfg_clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->base)) {
+ DRM_ERROR("failed to remap dsi io region\n");
+ return PTR_ERR(ctx->base);
+ }
+
+ return 0;
+}
+
+static int dsi_probe(struct platform_device *pdev)
+{
+ struct dsi_data *data;
+ struct hisi_dsi *dsi;
+ struct dsi_hw_ctx *ctx;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ DRM_ERROR("failed to allocate dsi data.\n");
+ return -ENOMEM;
+ }
+ dsi = &data->dsi;
+ ctx = &data->ctx;
+ dsi->ctx = ctx;
+
+ ret = dsi_parse_dt(pdev, dsi);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, data);
+
+ return component_add(&pdev->dev, &dsi_ops);
+}
+
+static int dsi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dsi_ops);
+
+ return 0;
+}
+
+static const struct of_device_id dsi_of_match[] = {
+ {.compatible = "hisilicon,hi6220-dsi"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, dsi_of_match);
+
+static struct platform_driver dsi_driver = {
+ .probe = dsi_probe,
+ .remove = dsi_remove,
+ .driver = {
+ .name = "hisi-dsi",
+ .owner = THIS_MODULE,
+ .of_match_table = dsi_of_match,
+ },
+};
+
+module_platform_driver(dsi_driver);
+
+MODULE_AUTHOR("Xinliang Liu <xinliang.liu@linaro.org>");
+MODULE_AUTHOR("Xinliang Liu <z.liuxinliang@hisilicon.com>");
+MODULE_AUTHOR("Xinwei Kong <kong.kongxinwei@hisilicon.com>");
+MODULE_DESCRIPTION("hisilicon hi6220 SoC dsi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/hisilicon/hisi_dsi_reg.h b/drivers/gpu/drm/hisilicon/hisi_dsi_reg.h
new file mode 100644
index 000000000000..db8f9df5822f
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hisi_dsi_reg.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __HISI_DSI_REG_H__
+#define __HISI_DSI_REG_H__
+
+/*
+ * regs
+ */
+#define PWR_UP (0x4) /* Core power-up */
+#define PHY_IF_CFG (0xA4) /* D-PHY interface configuration */
+#define CLKMGR_CFG (0x8) /* the internal clock dividers */
+#define PHY_RSTZ (0xA0) /* D-PHY reset control */
+#define PHY_TST_CTRL0 (0xB4) /* D-PHY test interface control 0 */
+#define PHY_TST_CTRL1 (0xB8) /* D-PHY test interface control 1 */
+#define DPI_VCID (0xC) /* DPI virtual channel id */
+#define DPI_COLOR_CODING (0x10) /* DPI color coding */
+#define DPI_CFG_POL (0x14) /* DPI polarity configuration */
+#define VID_HSA_TIME (0x48) /* Horizontal Sync Active time */
+#define VID_HBP_TIME (0x4C) /* Horizontal Back Porch time */
+#define VID_HLINE_TIME (0x50) /* Line time */
+#define VID_VSA_LINES (0x54) /* Vertical Sync Active period */
+#define VID_VBP_LINES (0x58) /* Vertical Back Porch period */
+#define VID_VFP_LINES (0x5C) /* Vertical Front Porch period */
+#define VID_VACTIVE_LINES (0x60) /* Vertical resolution */
+#define VID_PKT_SIZE (0x3C) /* Video packet size */
+#define VID_MODE_CFG (0x38) /* Video mode configuration */
+#define DPI_LP_CMD_TIM (0x18) /* Low-power command timing config */
+#define PHY_TMR_CFG (0x9C) /* Data lanes timing configuration */
+#define BTA_TO_CNT (0x8C) /* Response timeout definition */
+#define PHY_TMR_LPCLK_CFG (0x98) /* clock lane timing configuration */
+#define CLK_DATA_TMR_CFG (0xCC)
+#define LPCLK_CTRL (0x94) /* Low-power in clock lane */
+#define PCKHDL_CFG (0x2C) /* Packet handler configuration */
+#define EDPI_CMD_SIZE (0x64) /* Size for eDPI packets */
+#define MODE_CFG (0x34) /* Video or Command mode selection */
+#define PHY_STATUS (0xB0) /* D-PHY PPI status interface */
+
+#define PHY_STOP_WAIT_TIME (0x30)
+
+/*
+ * regs relevant enum
+ */
+enum dpi_color_coding {
+ DSI_24BITS_1 = 5,
+};
+
+enum dsi_video_mode_type {
+ DSI_NON_BURST_SYNC_PULSES = 0,
+ DSI_NON_BURST_SYNC_EVENTS,
+ DSI_BURST_SYNC_PULSES_1,
+ DSI_BURST_SYNC_PULSES_2
+};
+
+enum dsi_work_mode {
+ DSI_VIDEO_MODE = 0,
+ DSI_COMMAND_MODE
+};
+
+/*
+ * regs Write/Read functions
+ */
+static inline void dsi_phy_tst_set(void __iomem *base, u32 reg, u32 val)
+{
+ writel(reg, base + PHY_TST_CTRL1);
+ /* reg addr written at first */
+ wmb();
+ writel(0x02, base + PHY_TST_CTRL0);
+ /* cmd1 sent for write */
+ wmb();
+ writel(0x00, base + PHY_TST_CTRL0);
+ /* cmd2 sent for write */
+ wmb();
+ writel(val, base + PHY_TST_CTRL1);
+ /* Then write data */
+ wmb();
+ writel(0x02, base + PHY_TST_CTRL0);
+ /* cmd2 sent for write */
+ wmb();
+ writel(0x00, base + PHY_TST_CTRL0);
+}
+
+#endif /* __HISI_DRM_DSI_H__ */
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 2c72eb584ab7..46cc9fb18367 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -1,6 +1,6 @@
ccflags-y := -Iinclude/drm
-obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
+obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o adv7511_audio.o
ch7006-y := ch7006_drv.o ch7006_mode.o
obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 00416f23b5cb..6b80bfc266fa 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -12,41 +12,19 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/of_graph.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder_slave.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_mipi_dsi.h>
#include "adv7511.h"
-struct adv7511 {
- struct i2c_client *i2c_main;
- struct i2c_client *i2c_edid;
-
- struct regmap *regmap;
- struct regmap *packet_memory_regmap;
- enum drm_connector_status status;
- bool powered;
-
- unsigned int f_tmds;
-
- unsigned int current_edid_segment;
- uint8_t edid_buf[256];
- bool edid_read;
-
- wait_queue_head_t wq;
- struct drm_encoder *encoder;
-
- bool embedded_sync;
- enum adv7511_sync_polarity vsync_polarity;
- enum adv7511_sync_polarity hsync_polarity;
- bool rgb;
-
- struct edid *edid;
-
- struct gpio_desc *gpio_pd;
-};
+#define HPD_ENABLE 1
static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
{
@@ -66,6 +44,24 @@ static const struct reg_sequence adv7511_fixed_registers[] = {
{ 0x55, 0x02 },
};
+/* ADI recommended values for proper operation. */
+static const struct reg_sequence adv7533_fixed_registers[] = {
+ { 0x16, 0x20 },
+ { 0x9a, 0xe0 },
+ { 0xba, 0x70 },
+ { 0xde, 0x82 },
+ { 0xe4, 0x40 },
+ { 0xe5, 0x80 },
+};
+
+static const struct reg_sequence adv7533_cec_fixed_registers[] = {
+ { 0x15, 0xd0 },
+ { 0x17, 0xd0 },
+ { 0x24, 0x20 },
+ { 0x57, 0x11 },
+ { 0x05, 0xc8 },
+};
+
/* -----------------------------------------------------------------------------
* Register access
*/
@@ -158,6 +154,15 @@ static const struct regmap_config adv7511_regmap_config = {
.volatile_reg = adv7511_register_volatile,
};
+static const struct regmap_config adv7533_cec_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+
/* -----------------------------------------------------------------------------
* Hardware configuration
*/
@@ -193,7 +198,7 @@ static void adv7511_set_colormap(struct adv7511 *adv7511, bool enable,
ADV7511_CSC_UPDATE_MODE, 0);
}
-static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
+int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
{
if (packet & 0xff)
regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
@@ -208,7 +213,7 @@ static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
return 0;
}
-static int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
+int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
{
if (packet & 0xff)
regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
@@ -358,6 +363,79 @@ static void adv7511_set_link_config(struct adv7511 *adv7511,
adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
}
+static void adv7511_dsi_config_tgen(struct adv7511 *adv7511)
+{
+ struct mipi_dsi_device *dsi = adv7511->dsi;
+ struct drm_display_mode *mode = &adv7511->curr_mode;
+ u8 clock_div_by_lanes[] = { 6, 4, 3 }; /* 2, 3, 4 lanes */
+ unsigned int hsw, hfp, hbp, vsw, vfp, vbp;
+
+ hsw = mode->hsync_end - mode->hsync_start;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ vsw = mode->vsync_end - mode->vsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ /* set pixel clock divider mode */
+ regmap_write(adv7511->regmap_cec, 0x16,
+ clock_div_by_lanes[dsi->lanes - 2] << 3);
+
+ /* horizontal porch params */
+ regmap_write(adv7511->regmap_cec, 0x28, mode->htotal >> 4);
+ regmap_write(adv7511->regmap_cec, 0x29, (mode->htotal << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x2a, hsw >> 4);
+ regmap_write(adv7511->regmap_cec, 0x2b, (hsw << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x2c, hfp >> 4);
+ regmap_write(adv7511->regmap_cec, 0x2d, (hfp << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x2e, hbp >> 4);
+ regmap_write(adv7511->regmap_cec, 0x2f, (hbp << 4) & 0xff);
+
+ /* vertical porch params */
+ regmap_write(adv7511->regmap_cec, 0x30, mode->vtotal >> 4);
+ regmap_write(adv7511->regmap_cec, 0x31, (mode->vtotal << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x32, vsw >> 4);
+ regmap_write(adv7511->regmap_cec, 0x33, (vsw << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x34, vfp >> 4);
+ regmap_write(adv7511->regmap_cec, 0x35, (vfp << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x36, vbp >> 4);
+ regmap_write(adv7511->regmap_cec, 0x37, (vbp << 4) & 0xff);
+}
+
+static void adv7511_dsi_receiver_dpms(struct adv7511 *adv7511)
+{
+ if (adv7511->type != ADV7533)
+ return;
+
+ if (adv7511->powered) {
+ struct mipi_dsi_device *dsi = adv7511->dsi;
+
+ adv7511_dsi_config_tgen(adv7511);
+
+ /* set number of dsi lanes */
+ regmap_write(adv7511->regmap_cec, 0x1c, dsi->lanes << 4);
+
+#if 0
+ /* reset internal timing generator */
+ regmap_write(adv7511->regmap_cec, 0x27, 0xcb);
+ regmap_write(adv7511->regmap_cec, 0x27, 0x8b);
+ regmap_write(adv7511->regmap_cec, 0x27, 0xcb);
+#else
+ /* disable internal timing generator */
+ regmap_write(adv7511->regmap_cec, 0x27, 0x0b);
+#endif
+
+
+ /* enable hdmi */
+ regmap_write(adv7511->regmap_cec, 0x03, 0x89);
+ /* disable test mode */
+ regmap_write(adv7511->regmap_cec, 0x55, 0x00);
+ } else {
+ regmap_write(adv7511->regmap_cec, 0x03, 0x0b);
+ regmap_write(adv7511->regmap_cec, 0x27, 0x0b);
+ }
+}
+
static void adv7511_power_on(struct adv7511 *adv7511)
{
adv7511->current_edid_segment = -1;
@@ -386,7 +464,13 @@ static void adv7511_power_on(struct adv7511 *adv7511)
*/
regcache_sync(adv7511->regmap);
+ if (adv7511->type == ADV7533)
+ regmap_register_patch(adv7511->regmap_cec,
+ adv7533_cec_fixed_registers,
+ ARRAY_SIZE(adv7533_cec_fixed_registers));
adv7511->powered = true;
+
+ adv7511_dsi_receiver_dpms(adv7511);
}
static void adv7511_power_off(struct adv7511 *adv7511)
@@ -398,12 +482,15 @@ static void adv7511_power_off(struct adv7511 *adv7511)
regcache_mark_dirty(adv7511->regmap);
adv7511->powered = false;
+
+ adv7511_dsi_receiver_dpms(adv7511);
}
/* -----------------------------------------------------------------------------
* Interrupt and hotplug detection
*/
+#if HPD_ENABLE
static bool adv7511_hpd(struct adv7511 *adv7511)
{
unsigned int irq0;
@@ -421,8 +508,9 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
return false;
}
+#endif
-static int adv7511_irq_process(struct adv7511 *adv7511)
+static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
{
unsigned int irq0, irq1;
int ret;
@@ -438,7 +526,7 @@ static int adv7511_irq_process(struct adv7511 *adv7511)
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
- if (irq0 & ADV7511_INT0_HDP && adv7511->encoder)
+ if (process_hpd && irq0 & ADV7511_INT0_HDP && adv7511->encoder)
drm_helper_hpd_irq_event(adv7511->encoder->dev);
if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
@@ -456,7 +544,7 @@ static irqreturn_t adv7511_irq_handler(int irq, void *devid)
struct adv7511 *adv7511 = devid;
int ret;
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, true);
return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
}
@@ -473,7 +561,7 @@ static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
adv7511->edid_read, msecs_to_jiffies(timeout));
} else {
for (; timeout > 0; timeout -= 25) {
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, false);
if (ret < 0)
break;
@@ -555,18 +643,19 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
}
/* -----------------------------------------------------------------------------
- * Encoder operations
+ * ADV75xx helpers
*/
-
-static int adv7511_get_modes(struct drm_encoder *encoder,
- struct drm_connector *connector)
+static int adv7511_get_modes(struct adv7511 *adv7511,
+ struct drm_connector *connector)
{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
struct edid *edid;
unsigned int count;
/* Reading the EDID only works if the device is powered */
if (!adv7511->powered) {
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HDP_SRC_MASK,
+ ADV7511_REG_POWER2_HDP_SRC_NONE);
regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
ADV7511_INT0_EDID_READY);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
@@ -574,6 +663,8 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
adv7511->current_edid_segment = -1;
+ /* wait some time for edid is ready */
+ msleep(200);
}
edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
@@ -596,24 +687,15 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
return count;
}
-static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
-
- if (mode == DRM_MODE_DPMS_ON)
- adv7511_power_on(adv7511);
- else
- adv7511_power_off(adv7511);
-}
-
static enum drm_connector_status
-adv7511_encoder_detect(struct drm_encoder *encoder,
+adv7511_detect(struct adv7511 *adv7511,
struct drm_connector *connector)
{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
enum drm_connector_status status;
unsigned int val;
+#if HPD_ENABLE
bool hpd;
+#endif
int ret;
ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
@@ -625,6 +707,7 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
else
status = connector_status_disconnected;
+#if HPD_ENABLE
hpd = adv7511_hpd(adv7511);
/* The chip resets itself when the cable is disconnected, so in case
@@ -634,7 +717,7 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
if (status == connector_status_connected && hpd && adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
adv7511_power_on(adv7511);
- adv7511_get_modes(encoder, connector);
+ adv7511_get_modes(adv7511, connector);
if (adv7511->status == connector_status_connected)
status = connector_status_disconnected;
} else {
@@ -643,25 +726,41 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
ADV7511_REG_POWER2_HDP_SRC_MASK,
ADV7511_REG_POWER2_HDP_SRC_BOTH);
}
+#endif
adv7511->status = status;
return status;
}
-static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+static int adv7511_mode_valid(struct adv7511 *adv7511,
+ struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
-
- return MODE_OK;
+ /*
+ * some work well modes which want to put in the front of the mode list.
+ */
+ DRM_DEBUG("Checking mode %ix%i@%i clock: %i...",
+ mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode), mode->clock);
+ if ((mode->hdisplay == 1920 && mode->vdisplay == 1080 && mode->clock == 148500) ||
+ (mode->hdisplay == 1280 && mode->vdisplay == 800 && mode->clock == 83496) ||
+ (mode->hdisplay == 1280 && mode->vdisplay == 720 && mode->clock == 74440) ||
+ (mode->hdisplay == 1280 && mode->vdisplay == 720 && mode->clock == 74250) ||
+ (mode->hdisplay == 1024 && mode->vdisplay == 768 && mode->clock == 75000) ||
+ (mode->hdisplay == 1024 && mode->vdisplay == 768 && mode->clock == 81833) ||
+ (mode->hdisplay == 800 && mode->vdisplay == 600 && mode->clock == 40000)) {
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ DRM_DEBUG("OK\n");
+ return MODE_OK;
+ }
+ DRM_DEBUG("BAD\n");
+ return MODE_BAD;
}
-static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
+static void adv7511_mode_set(struct adv7511 *adv7511,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
unsigned int low_refresh_rate;
unsigned int hsync_polarity = 0;
unsigned int vsync_polarity = 0;
@@ -744,6 +843,28 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
regmap_update_bits(adv7511->regmap, 0x17,
0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
+ if (adv7511->type == ADV7533 && adv7511->num_dsi_lanes == 4) {
+ struct mipi_dsi_device *dsi = adv7511->dsi;
+ int lanes, ret;
+
+ if (adj_mode->clock > 80000)
+ lanes = 4;
+ else
+ lanes = 3;
+
+ if (lanes != dsi->lanes) {
+ mipi_dsi_detach(dsi);
+ dsi->lanes = lanes;
+ ret = mipi_dsi_attach(dsi);
+ if (ret) {
+ DRM_ERROR("Failed to change host lanes\n");
+ return;
+ }
+ }
+ }
+
+ drm_mode_copy(&adv7511->curr_mode, adj_mode);
+
/*
* TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
* supposed to give better results.
@@ -752,12 +873,247 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
adv7511->f_tmds = mode->clock;
}
+/* -----------------------------------------------------------------------------
+ * Encoder operations
+ */
+
+static int adv7511_encoder_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ return adv7511_get_modes(adv7511, connector);
+}
+
+static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ if (mode == DRM_MODE_DPMS_ON)
+ adv7511_power_on(adv7511);
+ else
+ adv7511_power_off(adv7511);
+}
+
+static enum drm_connector_status
+adv7511_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ return adv7511_detect(adv7511, connector);
+}
+
+static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ return adv7511_mode_valid(adv7511, mode);
+}
+
+static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ adv7511_mode_set(adv7511, mode, adj_mode);
+}
+
static struct drm_encoder_slave_funcs adv7511_encoder_funcs = {
.dpms = adv7511_encoder_dpms,
.mode_valid = adv7511_encoder_mode_valid,
.mode_set = adv7511_encoder_mode_set,
.detect = adv7511_encoder_detect,
- .get_modes = adv7511_get_modes,
+ .get_modes = adv7511_encoder_get_modes,
+};
+
+/* -----------------------------------------------------------------------------
+ * Bridge and connector functions
+ */
+
+static struct adv7511 *connector_to_adv7511(struct drm_connector *connector)
+{
+ return container_of(connector, struct adv7511, connector);
+}
+
+/* Connector helper functions */
+static int adv7533_connector_get_modes(struct drm_connector *connector)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_get_modes(adv, connector);
+}
+
+static struct drm_encoder *
+adv7533_connector_best_encoder(struct drm_connector *connector)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv->bridge.encoder;
+}
+
+static enum drm_mode_status
+adv7533_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_mode_valid(adv, mode);
+}
+
+static struct drm_connector_helper_funcs adv7533_connector_helper_funcs = {
+ .get_modes = adv7533_connector_get_modes,
+ .best_encoder = adv7533_connector_best_encoder,
+ .mode_valid = adv7533_connector_mode_valid,
+};
+
+static enum drm_connector_status
+adv7533_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_detect(adv, connector);
+}
+
+static struct drm_connector_funcs adv7533_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = adv7533_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+/* Bridge funcs */
+static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct adv7511, bridge);
+}
+
+static void adv7533_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_power_on(adv);
+}
+
+static void adv7533_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+#if HPD_ENABLE
+ if (!adv->powered)
+ return;
+#endif
+
+ adv7511_power_off(adv);
+}
+
+static void adv7533_bridge_enable(struct drm_bridge *bridge)
+{
+}
+
+static void adv7533_bridge_disable(struct drm_bridge *bridge)
+{
+}
+
+static void adv7533_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_mode_set(adv, mode, adj_mode);
+}
+
+static int adv7533_attach_dsi(struct adv7511 *adv7511)
+{
+ struct device *dev = &adv7511->i2c_main->dev;
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ int ret;
+
+ host = of_find_mipi_dsi_host_by_node(adv7511->host_node);
+ if (!host) {
+ dev_err(dev, "failed to find dsi host\n");
+ return -EPROBE_DEFER;
+ }
+
+ /* can adv7533 virtual channel be non-zero? */
+ dsi = mipi_dsi_new_dummy(host, 0);
+ if (IS_ERR(dsi)) {
+ dev_err(dev, "failed to create dummy dsi device\n");
+ ret = PTR_ERR(dsi);
+ goto err_dsi_device;
+ }
+
+ adv7511->dsi = dsi;
+
+ dsi->lanes = adv7511->num_dsi_lanes;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE
+ | MIPI_DSI_MODE_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "failed to attach dsi to host\n");
+ goto err_dsi_attach;
+ }
+
+ return 0;
+
+err_dsi_attach:
+ mipi_dsi_unregister_device(dsi);
+err_dsi_device:
+ return ret;
+}
+
+static int adv7533_bridge_attach(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+ int ret;
+
+ adv->encoder = bridge->encoder;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found");
+ return -ENODEV;
+ }
+
+#if HPD_ENABLE
+ adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
+#endif
+
+ ret = drm_connector_init(bridge->dev, &adv->connector,
+ &adv7533_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+ drm_connector_helper_add(&adv->connector,
+ &adv7533_connector_helper_funcs);
+ drm_connector_register(&adv->connector);
+ drm_mode_connector_attach_encoder(&adv->connector, adv->encoder);
+
+#if HPD_ENABLE
+ drm_helper_hpd_irq_event(adv->connector.dev);
+#endif
+
+ adv7533_attach_dsi(adv);
+
+ return ret;
+}
+
+static struct drm_bridge_funcs adv7533_bridge_funcs = {
+ .pre_enable = adv7533_bridge_pre_enable,
+ .enable = adv7533_bridge_enable,
+ .disable = adv7533_bridge_disable,
+ .post_disable = adv7533_bridge_post_disable,
+ .mode_set = adv7533_bridge_mode_set,
+ .attach = adv7533_bridge_attach,
};
/* -----------------------------------------------------------------------------
@@ -770,8 +1126,6 @@ static int adv7511_parse_dt(struct device_node *np,
const char *str;
int ret;
- memset(config, 0, sizeof(*config));
-
of_property_read_u32(np, "adi,input-depth", &config->input_color_depth);
if (config->input_color_depth != 8 && config->input_color_depth != 10 &&
config->input_color_depth != 12)
@@ -849,10 +1203,54 @@ static int adv7511_parse_dt(struct device_node *np,
return 0;
}
+static int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv7511)
+{
+ u32 num_lanes;
+ struct device_node *endpoint;
+
+ of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
+
+ if (num_lanes < 1 || num_lanes > 4)
+ return -EINVAL;
+
+ adv7511->num_dsi_lanes = num_lanes;
+
+ endpoint = of_graph_get_next_endpoint(np, NULL);
+ if (!endpoint) {
+ DRM_ERROR("adv dsi input endpoint not found\n");
+ return -ENODEV;
+ }
+
+ adv7511->host_node = of_graph_get_remote_port_parent(endpoint);
+ if (!adv7511->host_node) {
+ DRM_ERROR("dsi host node not found\n");
+ of_node_put(endpoint);
+ return -ENODEV;
+ }
+
+ of_node_put(endpoint);
+ of_node_put(adv7511->host_node);
+
+ /* TODO: Check if these need to be parsed by DT or not */
+ adv7511->rgb = true;
+ adv7511->embedded_sync = false;
+
+ return 0;
+}
+
static const int edid_i2c_addr = 0x7e;
static const int packet_i2c_addr = 0x70;
static const int cec_i2c_addr = 0x78;
+static const struct of_device_id adv7511_of_ids[] = {
+ { .compatible = "adi,adv7511", .data = (void *) ADV7511 },
+ { .compatible = "adi,adv7511w", .data = (void *) ADV7511 },
+ { .compatible = "adi,adv7513", .data = (void *) ADV7511 },
+ { .compatible = "adi,adv7533", .data = (void *) ADV7533 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adv7511_of_ids);
+
static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
struct adv7511_link_config link_config;
@@ -871,7 +1269,21 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511->powered = false;
adv7511->status = connector_status_disconnected;
- ret = adv7511_parse_dt(dev->of_node, &link_config);
+ if (dev->of_node) {
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(adv7511_of_ids, dev->of_node);
+ adv7511->type = (enum adv7511_type) of_id->data;
+ } else {
+ adv7511->type = id->driver_data;
+ }
+
+ memset(&link_config, 0, sizeof(link_config));
+
+ if (adv7511->type == ADV7511)
+ ret = adv7511_parse_dt(dev->of_node, &link_config);
+ else
+ ret = adv7533_parse_dt(dev->of_node, adv7511);
if (ret)
return ret;
@@ -897,10 +1309,19 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
return ret;
dev_dbg(dev, "Rev. %d\n", val);
- ret = regmap_register_patch(adv7511->regmap, adv7511_fixed_registers,
- ARRAY_SIZE(adv7511_fixed_registers));
- if (ret)
- return ret;
+ if (adv7511->type == ADV7511) {
+ ret = regmap_register_patch(adv7511->regmap,
+ adv7511_fixed_registers,
+ ARRAY_SIZE(adv7511_fixed_registers));
+ if (ret)
+ return ret;
+ } else {
+ ret = regmap_register_patch(adv7511->regmap,
+ adv7533_fixed_registers,
+ ARRAY_SIZE(adv7533_fixed_registers));
+ if (ret)
+ return ret;
+ }
regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
@@ -913,6 +1334,27 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
if (!adv7511->i2c_edid)
return -ENOMEM;
+ adv7511->i2c_cec = i2c_new_dummy(i2c->adapter, cec_i2c_addr >> 1);
+ if (!adv7511->i2c_cec) {
+ ret = -ENOMEM;
+ goto err_i2c_unregister_edid;
+ }
+
+ adv7511->regmap_cec = devm_regmap_init_i2c(adv7511->i2c_cec,
+ &adv7533_cec_regmap_config);
+ if (IS_ERR(adv7511->regmap_cec)) {
+ ret = PTR_ERR(adv7511->regmap_cec);
+ goto err_i2c_unregister_cec;
+ }
+
+ if (adv7511->type == ADV7533) {
+ ret = regmap_register_patch(adv7511->regmap_cec,
+ adv7533_cec_fixed_registers,
+ ARRAY_SIZE(adv7533_cec_fixed_registers));
+ if (ret)
+ return ret;
+ }
+
if (i2c->irq) {
init_waitqueue_head(&adv7511->wq);
@@ -921,7 +1363,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
IRQF_ONESHOT, dev_name(dev),
adv7511);
if (ret)
- goto err_i2c_unregister_device;
+ goto err_i2c_unregister_cec;
}
/* CEC is unused for now */
@@ -932,11 +1374,27 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
i2c_set_clientdata(i2c, adv7511);
- adv7511_set_link_config(adv7511, &link_config);
+ if (adv7511->type == ADV7511)
+ adv7511_set_link_config(adv7511, &link_config);
+
+ if (adv7511->type == ADV7533) {
+ adv7511->bridge.funcs = &adv7533_bridge_funcs;
+ adv7511->bridge.of_node = dev->of_node;
+
+ ret = drm_bridge_add(&adv7511->bridge);
+ if (ret) {
+ dev_err(dev, "failed to add adv7533 bridge\n");
+ goto err_i2c_unregister_cec;
+ }
+ }
+
+ adv7511_audio_init(dev);
return 0;
-err_i2c_unregister_device:
+err_i2c_unregister_cec:
+ i2c_unregister_device(adv7511->i2c_cec);
+err_i2c_unregister_edid:
i2c_unregister_device(adv7511->i2c_edid);
return ret;
@@ -946,10 +1404,18 @@ static int adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+ adv7511_audio_exit(&i2c->dev);
+ i2c_unregister_device(adv7511->i2c_cec);
i2c_unregister_device(adv7511->i2c_edid);
kfree(adv7511->edid);
+ if (adv7511->type == ADV7533) {
+ mipi_dsi_detach(adv7511->dsi);
+ mipi_dsi_unregister_device(adv7511->dsi);
+ drm_bridge_remove(&adv7511->bridge);
+ }
+
return 0;
}
@@ -959,6 +1425,9 @@ static int adv7511_encoder_init(struct i2c_client *i2c, struct drm_device *dev,
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+ if (adv7511->type == ADV7533)
+ return -ENODEV;
+
encoder->slave_priv = adv7511;
encoder->slave_funcs = &adv7511_encoder_funcs;
@@ -968,21 +1437,14 @@ static int adv7511_encoder_init(struct i2c_client *i2c, struct drm_device *dev,
}
static const struct i2c_device_id adv7511_i2c_ids[] = {
- { "adv7511", 0 },
- { "adv7511w", 0 },
- { "adv7513", 0 },
+ { "adv7511", ADV7511 },
+ { "adv7511w", ADV7511 },
+ { "adv7513", ADV7511 },
+ { "adv7533", ADV7533 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
-static const struct of_device_id adv7511_of_ids[] = {
- { .compatible = "adi,adv7511", },
- { .compatible = "adi,adv7511w", },
- { .compatible = "adi,adv7513", },
- { }
-};
-MODULE_DEVICE_TABLE(of, adv7511_of_ids);
-
static struct drm_i2c_encoder_driver adv7511_driver = {
.i2c_driver = {
.driver = {
diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/i2c/adv7511.h
index 6599ed538426..e9008bf69caa 100644
--- a/drivers/gpu/drm/i2c/adv7511.h
+++ b/drivers/gpu/drm/i2c/adv7511.h
@@ -10,6 +10,16 @@
#define __DRM_I2C_ADV7511_H__
#include <linux/hdmi.h>
+#include <drm/drm_crtc_helper.h>
+
+struct regmap;
+struct adv7511;
+
+int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet);
+int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet);
+
+int adv7511_audio_init(struct device *dev);
+void adv7511_audio_exit(struct device *dev);
#define ADV7511_REG_CHIP_REVISION 0x00
#define ADV7511_REG_N0 0x01
@@ -229,6 +239,54 @@ enum adv7511_sync_polarity {
ADV7511_SYNC_POLARITY_HIGH,
};
+enum adv7511_type {
+ ADV7511,
+ ADV7533,
+};
+
+struct adv7511 {
+ struct i2c_client *i2c_main;
+ struct i2c_client *i2c_edid;
+ struct i2c_client *i2c_cec;
+
+ struct regmap *regmap;
+ struct regmap *regmap_cec;
+ enum drm_connector_status status;
+ bool powered;
+
+ struct drm_display_mode curr_mode;
+
+ unsigned int f_tmds;
+ unsigned int f_audio;
+ unsigned int audio_source;
+
+ unsigned int current_edid_segment;
+ uint8_t edid_buf[256];
+ bool edid_read;
+
+ wait_queue_head_t wq;
+ struct drm_encoder *encoder;
+
+ struct drm_connector connector;
+ struct drm_bridge bridge;
+
+ bool embedded_sync;
+ enum adv7511_sync_polarity vsync_polarity;
+ enum adv7511_sync_polarity hsync_polarity;
+ bool rgb;
+
+ struct edid *edid;
+
+ struct gpio_desc *gpio_pd;
+
+ /* ADV7533 DSI RX related params */
+ struct device_node *host_node;
+ struct mipi_dsi_device *dsi;
+ u8 num_dsi_lanes;
+
+ enum adv7511_type type;
+};
+
/**
* struct adv7511_link_config - Describes adv7511 hardware configuration
* @input_color_depth: Number of bits per color component (8, 10 or 12)
diff --git a/drivers/gpu/drm/i2c/adv7511_audio.c b/drivers/gpu/drm/i2c/adv7511_audio.c
new file mode 100644
index 000000000000..52019e95d007
--- /dev/null
+++ b/drivers/gpu/drm/i2c/adv7511_audio.c
@@ -0,0 +1,312 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "adv7511.h"
+
+static const struct snd_soc_dapm_widget adv7511_dapm_widgets[] = {
+ SND_SOC_DAPM_OUTPUT("TMDS"),
+ SND_SOC_DAPM_AIF_IN("AIFIN", "Playback", 0, SND_SOC_NOPM, 0, 0),
+};
+
+static const struct snd_soc_dapm_route adv7511_routes[] = {
+ { "TMDS", NULL, "AIFIN" },
+};
+
+static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
+ unsigned int *cts, unsigned int *n)
+{
+ switch (fs) {
+ case 32000:
+ *n = 4096;
+ break;
+ case 44100:
+ *n = 6272;
+ break;
+ case 48000:
+ *n = 6144;
+ break;
+ }
+
+ *cts = ((f_tmds * *n) / (128 * fs)) * 1000;
+}
+
+static int adv7511_update_cts_n(struct adv7511 *adv7511)
+{
+ unsigned int cts = 0;
+ unsigned int n = 0;
+
+ adv7511_calc_cts_n(adv7511->f_tmds, adv7511->f_audio, &cts, &n);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_N0, (n >> 16) & 0xf);
+ regmap_write(adv7511->regmap, ADV7511_REG_N1, (n >> 8) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_N2, n & 0xff);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL0,
+ (cts >> 16) & 0xf);
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL1,
+ (cts >> 8) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL2,
+ cts & 0xff);
+
+ return 0;
+}
+
+static int adv7511_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct adv7511 *adv7511 = snd_soc_codec_get_drvdata(codec);
+ unsigned int rate;
+ unsigned int len;
+ switch (params_rate(params)) {
+ case 32000:
+ rate = ADV7511_SAMPLE_FREQ_32000;
+ break;
+ case 44100:
+ rate = ADV7511_SAMPLE_FREQ_44100;
+ break;
+ case 48000:
+ rate = ADV7511_SAMPLE_FREQ_48000;
+ break;
+ case 88200:
+ rate = ADV7511_SAMPLE_FREQ_88200;
+ break;
+ case 96000:
+ rate = ADV7511_SAMPLE_FREQ_96000;
+ break;
+ case 176400:
+ rate = ADV7511_SAMPLE_FREQ_176400;
+ break;
+ case 192000:
+ rate = ADV7511_SAMPLE_FREQ_192000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ len = ADV7511_I2S_SAMPLE_LEN_16;
+ break;
+ case SNDRV_PCM_FORMAT_S18_3LE:
+ len = ADV7511_I2S_SAMPLE_LEN_18;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ len = ADV7511_I2S_SAMPLE_LEN_20;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ len = ADV7511_I2S_SAMPLE_LEN_24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ adv7511->f_audio = params_rate(params);
+
+ adv7511_update_cts_n(adv7511);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG3,
+ ADV7511_AUDIO_CFG3_LEN_MASK, len);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
+ ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
+ regmap_write(adv7511->regmap, 0x73, 0x1);
+
+ return 0;
+}
+
+static int adv7511_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct adv7511 *adv7511 = snd_soc_codec_get_drvdata(codec);
+ unsigned int audio_source, i2s_format = 0;
+ unsigned int invert_clock;
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_I2S;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_RIGHT_J;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_LEFT_J;
+ break;
+// case SND_SOC_DAIFMT_SPDIF:
+// audio_source = ADV7511_AUDIO_SOURCE_SPDIF;
+// break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ invert_clock = 0;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ invert_clock = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_SOURCE, 0x70,
+ audio_source << 4);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, BIT(6),
+ invert_clock << 6);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2S_CONFIG, 0x03,
+ i2s_format);
+
+ adv7511->audio_source = audio_source;
+
+ return 0;
+}
+
+static int adv7511_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct adv7511 *adv7511 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ switch (adv7511->audio_source) {
+ case ADV7511_AUDIO_SOURCE_I2S:
+ break;
+ case ADV7511_AUDIO_SOURCE_SPDIF:
+ regmap_update_bits(adv7511->regmap,
+ ADV7511_REG_AUDIO_CONFIG, BIT(7),
+ BIT(7));
+ break;
+ }
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ if (dapm->bias_level == SND_SOC_BIAS_STANDBY) {
+ adv7511_packet_enable(adv7511,
+ ADV7511_PACKET_ENABLE_AUDIO_SAMPLE);
+ adv7511_packet_enable(adv7511,
+ ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME);
+ adv7511_packet_enable(adv7511,
+ ADV7511_PACKET_ENABLE_N_CTS);
+ } else {
+ adv7511_packet_disable(adv7511,
+ ADV7511_PACKET_ENABLE_AUDIO_SAMPLE);
+ adv7511_packet_disable(adv7511,
+ ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME);
+ adv7511_packet_disable(adv7511,
+ ADV7511_PACKET_ENABLE_N_CTS);
+ }
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+ BIT(7), 0);
+ break;
+ case SND_SOC_BIAS_OFF:
+ break;
+ }
+ dapm->bias_level = level;
+ return 0;
+}
+
+#define ADV7511_RATES (SNDRV_PCM_RATE_32000 |\
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
+
+#define ADV7511_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE |\
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE)
+
+static const struct snd_soc_dai_ops adv7511_dai_ops = {
+ .hw_params = adv7511_hw_params,
+ /*.set_sysclk = adv7511_set_dai_sysclk,*/
+ .set_fmt = adv7511_set_dai_fmt,
+};
+
+static struct snd_soc_dai_driver adv7511_dai = {
+ .name = "adv7511",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = ADV7511_RATES,
+ .formats = ADV7511_FORMATS,
+ },
+ .ops = &adv7511_dai_ops,
+};
+
+static int adv7511_suspend(struct snd_soc_codec *codec)
+{
+ return adv7511_set_bias_level(codec, SND_SOC_BIAS_OFF);
+}
+
+static int adv7511_resume(struct snd_soc_codec *codec)
+{
+ return adv7511_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+}
+
+static int adv7511_probe(struct snd_soc_codec *codec)
+{
+ return adv7511_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+}
+
+static int adv7511_remove(struct snd_soc_codec *codec)
+{
+ adv7511_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ return 0;
+}
+
+static struct snd_soc_codec_driver adv7511_codec_driver = {
+ .probe = adv7511_probe,
+ .remove = adv7511_remove,
+ .suspend = adv7511_suspend,
+ .resume = adv7511_resume,
+ .set_bias_level = adv7511_set_bias_level,
+
+ .dapm_widgets = adv7511_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(adv7511_dapm_widgets),
+ .dapm_routes = adv7511_routes,
+ .num_dapm_routes = ARRAY_SIZE(adv7511_routes),
+};
+
+int adv7511_audio_init(struct device *dev)
+{
+ return snd_soc_register_codec(dev, &adv7511_codec_driver,
+ &adv7511_dai, 1);
+}
+
+void adv7511_audio_exit(struct device *dev)
+{
+ snd_soc_unregister_codec(dev);
+}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index b4741d121a74..61fcb3b22297 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -402,6 +402,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_gem_stolen;
+ intel_setup_gmbus(dev);
+
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
@@ -451,6 +453,7 @@ cleanup_gem:
cleanup_irq:
intel_guc_ucode_fini(dev);
drm_irq_uninstall(dev);
+ intel_teardown_gmbus(dev);
cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo:
@@ -1028,7 +1031,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
- intel_setup_gmbus(dev);
intel_opregion_setup(dev);
i915_gem_load(dev);
@@ -1099,7 +1101,6 @@ out_gem_unload:
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
- intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
pm_qos_remove_request(&dev_priv->pm_qos);
destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
@@ -1198,7 +1199,6 @@ int i915_driver_unload(struct drm_device *dev)
intel_csr_ucode_fini(dev);
- intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->hotplug.dp_wq);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 760e0ce4aa26..a6ad938f44a6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -531,7 +531,10 @@ void intel_detect_pch(struct drm_device *dev)
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev));
- } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) {
+ } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
+ ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
+ pch->subsystem_vendor == 0x1af4 &&
+ pch->subsystem_device == 0x1100)) {
dev_priv->pch_type = intel_virt_detect_pch(dev);
} else
continue;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f4af19a0d569..d3ce4da6a6ad 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2614,6 +2614,7 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
+#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 02ceb7a4b481..0433d25f9d23 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -340,6 +340,10 @@ void i915_gem_context_reset(struct drm_device *dev)
i915_gem_context_unreference(lctx);
ring->last_context = NULL;
}
+
+ /* Force the GPU state to be reinitialised on enabling */
+ if (ring->default_context)
+ ring->default_context->legacy_hw_ctx.initialized = false;
}
}
@@ -708,7 +712,7 @@ static int do_switch(struct drm_i915_gem_request *req)
if (ret)
goto unpin_out;
- if (!to->legacy_hw_ctx.initialized) {
+ if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
hw_flags |= MI_RESTORE_INHIBIT;
/* NB: If we inhibit the restore, the context is not allowed to
* die because future work may end up depending on valid address
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0d228f909dcb..0f42a2782afc 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2354,9 +2354,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
spt_irq_handler(dev, pch_iir);
else
cpt_irq_handler(dev, pch_iir);
- } else
- DRM_ERROR("The master control interrupt lied (SDE)!\n");
-
+ } else {
+ /*
+ * Like on previous PCH there seems to be something
+ * fishy going on with forwarding PCH interrupts.
+ */
+ DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
+ }
}
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index a6752a61d99f..7e6158b889da 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1582,7 +1582,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq;
- } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_DP_MST) {
switch (crtc_state->port_clock / 2) {
case 81000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 32cf97346978..f859a5b87ed4 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11930,11 +11930,21 @@ connected_sink_compute_bpp(struct intel_connector *connector,
pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
}
- /* Clamp bpp to 8 on screens without EDID 1.4 */
- if (connector->base.display_info.bpc == 0 && bpp > 24) {
- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
- bpp);
- pipe_config->pipe_bpp = 24;
+ /* Clamp bpp to default limit on screens without EDID 1.4 */
+ if (connector->base.display_info.bpc == 0) {
+ int type = connector->base.connector_type;
+ int clamp_bpp = 24;
+
+ /* Fall back to 18 bpp when DP sink capability is unknown. */
+ if (type == DRM_MODE_CONNECTOR_DisplayPort ||
+ type == DRM_MODE_CONNECTOR_eDP)
+ clamp_bpp = 18;
+
+ if (bpp > clamp_bpp) {
+ DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
+ bpp, clamp_bpp);
+ pipe_config->pipe_bpp = clamp_bpp;
+ }
}
}
@@ -13537,11 +13547,12 @@ intel_check_primary_plane(struct drm_plane *plane,
int max_scale = DRM_PLANE_HELPER_NO_SCALING;
bool can_position = false;
- /* use scaler when colorkey is not required */
- if (INTEL_INFO(plane->dev)->gen >= 9 &&
- state->ckey.flags == I915_SET_COLORKEY_NONE) {
- min_scale = 1;
- max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
+ if (INTEL_INFO(plane->dev)->gen >= 9) {
+ /* use scaler when colorkey is not required */
+ if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
+ min_scale = 1;
+ max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
+ }
can_position = true;
}
@@ -15565,6 +15576,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
intel_cleanup_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
+
+ intel_teardown_gmbus(dev);
}
/*
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index a5e99ac305da..a8912aecc31f 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -207,7 +207,12 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
gpio = *data++;
/* pull up/down */
- action = *data++;
+ action = *data++ & 1;
+
+ if (gpio >= ARRAY_SIZE(gtable)) {
+ DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
+ goto out;
+ }
function = gtable[gpio].function_reg;
pad = gtable[gpio].pad_reg;
@@ -226,6 +231,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
vlv_gpio_nc_write(dev_priv, pad, val);
mutex_unlock(&dev_priv->sb_lock);
+out:
return data;
}
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index b17785719598..d7a6437d9da2 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -468,9 +468,14 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct intel_connector *intel_connector = to_intel_connector(connector);
connector->polled = intel_connector->polled;
- if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ /* MST has a dynamic intel_connector->encoder and it's reprobing
+ * is all handled by the MST helpers. */
if (intel_connector->mst_port)
+ continue;
+
+ if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
+ intel_connector->encoder->hpd_pin > HPD_NONE)
connector->polled = DRM_CONNECTOR_POLL_HPD;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 8324654037b6..f3bee54c414f 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -675,7 +675,7 @@ int intel_setup_gmbus(struct drm_device *dev)
return 0;
err:
- while (--pin) {
+ while (pin--) {
if (!intel_gmbus_is_valid_pin(dev_priv, pin))
continue;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 88e12bdf79e2..d69547a65dbb 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1706,6 +1706,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9461a238f5d5..f6b2a814e629 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -347,6 +347,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
if (invalidate_domains) {
@@ -419,6 +420,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
if (invalidate_domains) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 2e7cbe933533..2a5ed7460354 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -969,10 +969,13 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
+ mutex_lock(&drm->dev->mode_config.mutex);
if (plugged)
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
else
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ mutex_unlock(&drm->dev->mode_config.mutex);
+
drm_helper_hpd_irq_event(connector->dev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 64c8d932d5f1..58a3f7cf2fb3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -634,10 +634,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
nv_crtc->lut.depth = 0;
}
- /* Make sure that drm and hw vblank irqs get resumed if needed. */
- for (head = 0; head < dev->mode_config.num_crtc; head++)
- drm_vblank_on(dev, head);
-
/* This should ensure we don't hit a locking problem when someone
* wakes us up via a connector. We should never go into suspend
* while the display is on anyways.
@@ -647,6 +643,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
drm_helper_resume_force_mode(dev);
+ /* Make sure that drm and hw vblank irqs get resumed if needed. */
+ for (head = 0; head < dev->mode_config.num_crtc; head++)
+ drm_vblank_on(dev, head);
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 60e32c4e4e49..35ecc0d0458f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -24,7 +24,7 @@
static int nouveau_platform_probe(struct platform_device *pdev)
{
const struct nvkm_device_tegra_func *func;
- struct nvkm_device *device;
+ struct nvkm_device *device = NULL;
struct drm_device *drm;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 7f8a42721eb2..e7e581d6a8ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -252,32 +252,40 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
return -ENOMEM;
- *pdevice = &tdev->device;
+
tdev->func = func;
tdev->pdev = pdev;
tdev->irq = -1;
tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(tdev->vdd))
- return PTR_ERR(tdev->vdd);
+ if (IS_ERR(tdev->vdd)) {
+ ret = PTR_ERR(tdev->vdd);
+ goto free;
+ }
tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
- if (IS_ERR(tdev->rst))
- return PTR_ERR(tdev->rst);
+ if (IS_ERR(tdev->rst)) {
+ ret = PTR_ERR(tdev->rst);
+ goto free;
+ }
tdev->clk = devm_clk_get(&pdev->dev, "gpu");
- if (IS_ERR(tdev->clk))
- return PTR_ERR(tdev->clk);
+ if (IS_ERR(tdev->clk)) {
+ ret = PTR_ERR(tdev->clk);
+ goto free;
+ }
tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
- if (IS_ERR(tdev->clk_pwr))
- return PTR_ERR(tdev->clk_pwr);
+ if (IS_ERR(tdev->clk_pwr)) {
+ ret = PTR_ERR(tdev->clk_pwr);
+ goto free;
+ }
nvkm_device_tegra_probe_iommu(tdev);
ret = nvkm_device_tegra_power_up(tdev);
if (ret)
- return ret;
+ goto remove;
tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
@@ -285,9 +293,19 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
cfg, dbg, detect, mmio, subdev_mask,
&tdev->device);
if (ret)
- return ret;
+ goto powerdown;
+
+ *pdevice = &tdev->device;
return 0;
+
+powerdown:
+ nvkm_device_tegra_power_down(tdev);
+remove:
+ nvkm_device_tegra_remove_iommu(tdev);
+free:
+ kfree(tdev);
+ return ret;
}
#else
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 74e2f7c6c07e..9688970eca47 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -328,6 +328,7 @@ nvkm_dp_train(struct work_struct *w)
.outp = outp,
}, *dp = &_dp;
u32 datarate = 0;
+ u8 pwr;
int ret;
if (!outp->base.info.location && disp->func->sor.magic)
@@ -355,6 +356,15 @@ nvkm_dp_train(struct work_struct *w)
/* disable link interrupt handling during link training */
nvkm_notify_put(&outp->irq);
+ /* ensure sink is not in a low-power state */
+ if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
+ if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
+ pwr &= ~DPCD_SC00_SET_POWER;
+ pwr |= DPCD_SC00_SET_POWER_D0;
+ nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1);
+ }
+ }
+
/* enable down-spreading and execute pre-train script from vbios */
dp_link_train_init(dp, outp->dpcd[3] & 0x01);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
index 9596290329c7..6e10c5e0ef11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
@@ -71,5 +71,11 @@
#define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c
#define DPCD_LS0C_LANE0_POST_CURSOR2 0x03
+/* DPCD Sink Control */
+#define DPCD_SC00 0x00600
+#define DPCD_SC00_SET_POWER 0x03
+#define DPCD_SC00_SET_POWER_D0 0x01
+#define DPCD_SC00_SET_POWER_D3 0x03
+
void nvkm_dp_train(struct work_struct *);
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 2ae8577497ca..7c2e78201ead 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
cmd->command_size))
return -EFAULT;
- reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
+ reloc_info = kmalloc_array(cmd->relocs_num,
+ sizeof(struct qxl_reloc_info), GFP_KERNEL);
if (!reloc_info)
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index bb292143997e..adf74f4366bb 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -892,8 +892,6 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
else
args.v1.ucLaneNum = 4;
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
@@ -910,6 +908,10 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+
+ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+
break;
case 2:
case 3:
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 752072771388..367a916f364e 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -301,6 +301,14 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
if (ASIC_IS_DCE8(rdev)) {
+ unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) &
+ DENTIST_DPREFCLK_WDIVIDER_MASK) >>
+ DENTIST_DPREFCLK_WDIVIDER_SHIFT;
+ div = radeon_audio_decode_dfs_div(div);
+
+ if (div)
+ clock = clock * 100 / div;
+
WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
} else {
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 9953356fe263..3cf04a2f44bb 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
+ if (ASIC_IS_DCE41(rdev)) {
+ unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) &
+ DENTIST_DPREFCLK_WDIVIDER_MASK) >>
+ DENTIST_DPREFCLK_WDIVIDER_SHIFT;
+ div = radeon_audio_decode_dfs_div(div);
+
+ if (div)
+ clock = 100 * clock / div;
+ }
+
WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
}
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 4aa5f755572b..13b6029d65cc 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -511,6 +511,11 @@
#define DCCG_AUDIO_DTO1_CNTL 0x05cc
# define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
+#define DCE41_DENTIST_DISPCLK_CNTL 0x049c
+# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
+# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
+# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
+
/* DCE 4.0 AFMT */
#define HDMI_CONTROL 0x7030
# define HDMI_KEEPOUT_MODE (1 << 0)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 87db64983ea8..5580568088bb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -268,6 +268,7 @@ struct radeon_clock {
uint32_t current_dispclk;
uint32_t dp_extclk;
uint32_t max_pixel_clock;
+ uint32_t vco_freq;
};
/*
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 8f285244c839..de9a2ffcf5f7 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -437,7 +437,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
- if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
+ if (((dev->pdev->device == 0x9802) ||
+ (dev->pdev->device == 0x9805) ||
+ (dev->pdev->device == 0x9806)) &&
(dev->pdev->subsystem_vendor == 0x1734) &&
(dev->pdev->subsystem_device == 0x11bd)) {
if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
@@ -448,14 +450,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
- /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
- if ((dev->pdev->device == 0x9805) &&
- (dev->pdev->subsystem_vendor == 0x1734) &&
- (dev->pdev->subsystem_device == 0x11bd)) {
- if (*connector_type == DRM_MODE_CONNECTOR_VGA)
- return false;
- }
-
return true;
}
@@ -1112,6 +1106,31 @@ union firmware_info {
ATOM_FIRMWARE_INFO_V2_2 info_22;
};
+union igp_info {
+ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+};
+
+static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ union igp_info *igp_info;
+ u8 frev, crev;
+ u16 data_offset;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)(mode_info->atom_context->bios +
+ data_offset);
+ rdev->clock.vco_freq =
+ le32_to_cpu(igp_info->info_6.ulDentistVCOFreq);
+ }
+}
+
bool radeon_atom_get_clock_info(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
@@ -1263,20 +1282,25 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
rdev->mode_info.firmware_flags =
le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
+ if (ASIC_IS_DCE8(rdev))
+ rdev->clock.vco_freq =
+ le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
+ else if (ASIC_IS_DCE5(rdev))
+ rdev->clock.vco_freq = rdev->clock.current_dispclk;
+ else if (ASIC_IS_DCE41(rdev))
+ radeon_atombios_get_dentist_vco_freq(rdev);
+ else
+ rdev->clock.vco_freq = rdev->clock.current_dispclk;
+
+ if (rdev->clock.vco_freq == 0)
+ rdev->clock.vco_freq = 360000; /* 3.6 GHz */
+
return true;
}
return false;
}
-union igp_info {
- struct _ATOM_INTEGRATED_SYSTEM_INFO info;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
-};
-
bool radeon_atombios_sideport_present(struct radeon_device *rdev)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index c4b4f298a283..9bc408c9f9f6 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -62,6 +62,10 @@ bool radeon_has_atpx(void) {
return radeon_atpx_priv.atpx_detected;
}
+bool radeon_has_atpx_dgpu_power_cntl(void) {
+ return radeon_atpx_priv.atpx.functions.power_cntl;
+}
+
/**
* radeon_atpx_call - call an ATPX method
*
@@ -141,10 +145,6 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
*/
static int radeon_atpx_validate(struct radeon_atpx *atpx)
{
- /* make sure required functions are enabled */
- /* dGPU power control is required */
- atpx->functions.power_cntl = true;
-
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 2c02e99b5f95..b214663b370d 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -739,9 +739,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector =
- radeon_connector->con_priv;
if (!dig || !dig->afmt)
return;
@@ -753,10 +750,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
radeon_audio_write_speaker_allocation(encoder);
radeon_audio_write_sad_regs(encoder);
radeon_audio_write_latency_fields(encoder, mode);
- if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
- radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
- else
- radeon_audio_set_dto(encoder, dig_connector->dp_clock);
+ radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10);
radeon_audio_set_audio_packet(encoder);
radeon_audio_select_pin(encoder);
@@ -781,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode)
if (radeon_encoder->audio && radeon_encoder->audio->dpms)
radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON);
}
+
+unsigned int radeon_audio_decode_dfs_div(unsigned int div)
+{
+ if (div >= 8 && div < 64)
+ return (div - 8) * 25 + 200;
+ else if (div >= 64 && div < 96)
+ return (div - 64) * 50 + 1600;
+ else if (div >= 96 && div < 128)
+ return (div - 96) * 100 + 3200;
+ else
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
index 059cc3012062..5c70cceaa4a6 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev);
void radeon_audio_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode);
void radeon_audio_dpms(struct drm_encoder *encoder, int mode);
+unsigned int radeon_audio_decode_dfs_div(unsigned int div);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c566993a2ec3..f78f111e68de 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -103,6 +103,12 @@ static const char radeon_family_name[][16] = {
"LAST",
};
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_has_atpx_dgpu_power_cntl(void);
+#else
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+#endif
+
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
@@ -1433,7 +1439,7 @@ int radeon_device_init(struct radeon_device *rdev,
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- if (rdev->flags & RADEON_IS_PX)
+ if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
runtime = true;
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
if (runtime)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 1eca0acac016..3645b223aa37 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -403,7 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
struct drm_crtc *crtc = &radeon_crtc->base;
unsigned long flags;
int r;
- int vpos, hpos, stat, min_udelay;
+ int vpos, hpos, stat, min_udelay = 0;
+ unsigned repcnt = 4;
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
down_read(&rdev->exclusive_lock);
@@ -454,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small.
*/
- for (;;) {
+ while (radeon_crtc->enabled && --repcnt) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in
* vpos.
@@ -470,12 +471,24 @@ static void radeon_flip_work_func(struct work_struct *__work)
break;
/* Sleep at least until estimated real start of hw vblank */
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+ if (min_udelay > vblank->framedur_ns / 2000) {
+ /* Don't wait ridiculously long - something is wrong */
+ repcnt = 0;
+ break;
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
};
+ if (!repcnt)
+ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
+ "framedur %d, linedur %d, stat %d, vpos %d, "
+ "hpos %d\n", work->crtc_id, min_udelay,
+ vblank->framedur_ns / 1000,
+ vblank->linedur_ns / 1000, stat, vpos, hpos);
+
/* do the flip (mmio) */
radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 84d45633d28c..fb6ad143873f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
+#include <drm/drm_cache.h>
#include "radeon.h"
#include "radeon_trace.h"
@@ -245,6 +246,12 @@ int radeon_bo_create(struct radeon_device *rdev,
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
"better performance thanks to write-combining\n");
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+#else
+ /* For architectures that don't support WC memory,
+ * mask out the WC flag from the BO
+ */
+ if (!drm_arch_can_wc_memory())
+ bo->flags &= ~RADEON_GEM_GTT_WC;
#endif
radeon_ttm_placement_from_domain(bo, domain);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 59abebd6b5dc..60ab31517153 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1078,10 +1078,6 @@ force:
/* update displays */
radeon_dpm_display_configuration_changed(rdev);
- rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
- rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
- rdev->pm.dpm.single_display = single_display;
-
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
struct radeon_ring *ring = &rdev->ring[i];
@@ -1097,6 +1093,10 @@ force:
radeon_dpm_post_set_power_state(rdev);
+ rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+ rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+ rdev->pm.dpm.single_display = single_display;
+
if (rdev->asic->dpm.force_performance_level) {
if (rdev->pm.dpm.thermal_active) {
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index c507896aca45..197b157b73d0 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
/* see if we can skip over some allocations */
} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ radeon_fence_ref(fences[i]);
+
spin_unlock(&sa_manager->wq.lock);
r = radeon_fence_wait_any(rdev, fences, false);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ radeon_fence_unref(&fences[i]);
spin_lock(&sa_manager->wq.lock);
/* if we have nothing to wait for block */
if (r == -ENOENT) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e34307459e50..e06ac546a90f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -758,7 +758,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
- while (--i) {
+ while (i--) {
pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
gtt->ttm.dma_address[i] = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 48d97c040f49..3979632b9225 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -455,15 +455,15 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
if (soffset) {
/* make sure object fit at this offset */
- eoffset = soffset + size;
+ eoffset = soffset + size - 1;
if (soffset >= eoffset) {
r = -EINVAL;
goto error_unreserve;
}
last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
- if (last_pfn > rdev->vm_manager.max_pfn) {
- dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+ if (last_pfn >= rdev->vm_manager.max_pfn) {
+ dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n",
last_pfn, rdev->vm_manager.max_pfn);
r = -EINVAL;
goto error_unreserve;
@@ -478,7 +478,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
eoffset /= RADEON_GPU_PAGE_SIZE;
if (soffset || eoffset) {
struct interval_tree_node *it;
- it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
+ it = interval_tree_iter_first(&vm->va, soffset, eoffset);
if (it && it != &bo_va->it) {
struct radeon_bo_va *tmp;
tmp = container_of(it, struct radeon_bo_va, it);
@@ -518,7 +518,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
if (soffset || eoffset) {
spin_lock(&vm->status_lock);
bo_va->it.start = soffset;
- bo_va->it.last = eoffset - 1;
+ bo_va->it.last = eoffset;
list_add(&bo_va->vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
interval_tree_insert(&bo_va->it, &vm->va);
@@ -888,7 +888,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
unsigned i;
start >>= radeon_vm_block_size;
- end >>= radeon_vm_block_size;
+ end = (end - 1) >> radeon_vm_block_size;
for (i = start; i <= end; ++i)
radeon_bo_fence(vm->page_tables[i].bo, fence, true);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index a82b891ae1fe..7285adb27099 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2926,9 +2926,11 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
{ 0, 0, 0, 0 },
};
@@ -3008,6 +3010,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
}
++p;
}
+ /* limit mclk on all R7 370 parts for stability */
+ if (rdev->pdev->device == 0x6811 &&
+ rdev->pdev->revision == 0x81)
+ max_mclk = 120000;
if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 4c4a7218a3bd..d1a7b58dd291 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -915,6 +915,11 @@
#define DCCG_AUDIO_DTO1_PHASE 0x05c0
#define DCCG_AUDIO_DTO1_MODULE 0x05c4
+#define DENTIST_DISPCLK_CNTL 0x0490
+# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
+# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
+# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
+
#define AFMT_AUDIO_SRC_CONTROL 0x713c
#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
/* AFMT_AUDIO_SRC_SELECT
diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
index 07a0d378e122..a01efe39a820 100644
--- a/drivers/gpu/drm/radeon/vce_v1_0.c
+++ b/drivers/gpu/drm/radeon/vce_v1_0.c
@@ -178,12 +178,12 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
return -EINVAL;
}
- for (i = 0; i < sign->num; ++i) {
- if (sign->val[i].chip_id == chip_id)
+ for (i = 0; i < le32_to_cpu(sign->num); ++i) {
+ if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
break;
}
- if (i == sign->num)
+ if (i == le32_to_cpu(sign->num))
return -EINVAL;
data += (256 - 64) / 4;
@@ -191,18 +191,18 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
data[1] = sign->val[i].nonce[1];
data[2] = sign->val[i].nonce[2];
data[3] = sign->val[i].nonce[3];
- data[4] = sign->len + 64;
+ data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64);
memset(&data[5], 0, 44);
memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign));
- data += data[4] / 4;
+ data += le32_to_cpu(data[4]) / 4;
data[0] = sign->val[i].sigval[0];
data[1] = sign->val[i].sigval[1];
data[2] = sign->val[i].sigval[2];
data[3] = sign->val[i].sigval[3];
- rdev->vce.keyselect = sign->val[i].keyselect;
+ rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 62c7b1dafaa4..73e41a8613da 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -539,7 +539,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_gfree:
- drm_gem_object_unreference(&ufbdev->ufb.obj->base);
+ drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
out:
return ret;
}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 2a0a784ab6ee..d7528e0d8442 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
return ret;
}
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference_unlocked(&obj->base);
*handle_p = handle;
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 6377e8151000..67cebb23c940 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -247,7 +247,7 @@ static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
{
struct vmw_cmdbuf_man *man = header->man;
- BUG_ON(!spin_is_locked(&man->lock));
+ lockdep_assert_held_once(&man->lock);
if (header->inline_space) {
vmw_cmdbuf_header_inline_free(header);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index c49812b80dd0..24fb348a44e1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -25,6 +25,7 @@
*
**************************************************************************/
#include <linux/module.h>
+#include <linux/console.h>
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
@@ -1538,6 +1539,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static int __init vmwgfx_init(void)
{
int ret;
+
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force())
+ return -EINVAL;
+#endif
+
ret = drm_pci_init(&driver, &vmw_pci_driver);
if (ret)
DRM_ERROR("Failed initializing DRM.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 9b4bb9e74d73..7c2e118a77b0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -763,21 +763,25 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
uint32_t format;
struct drm_vmw_size content_base_size;
struct vmw_resource *res;
+ unsigned int bytes_pp;
int ret;
switch (mode_cmd->depth) {
case 32:
case 24:
format = SVGA3D_X8R8G8B8;
+ bytes_pp = 4;
break;
case 16:
case 15:
format = SVGA3D_R5G6B5;
+ bytes_pp = 2;
break;
case 8:
format = SVGA3D_P8;
+ bytes_pp = 1;
break;
default:
@@ -785,7 +789,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
return -EINVAL;
}
- content_base_size.width = mode_cmd->width;
+ content_base_size.width = mode_cmd->pitch / bytes_pp;
content_base_size.height = mode_cmd->height;
content_base_size.depth = 1;
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index f2e13eb8339f..a0e28f3a278d 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1050,6 +1050,17 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
const struct ipu_platform_reg *reg = &client_reg[i];
struct platform_device *pdev;
+ struct device_node *of_node;
+
+ /* Associate subdevice with the corresponding port node */
+ of_node = of_graph_get_port_by_id(dev->of_node, i);
+ if (!of_node) {
+ dev_info(dev,
+ "no port@%d node in %s, not using %s%d\n",
+ i, dev->of_node->full_name,
+ (i / 2) ? "DI" : "CSI", i % 2);
+ continue;
+ }
pdev = platform_device_alloc(reg->name, id++);
if (!pdev) {
@@ -1057,17 +1068,9 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
goto err_register;
}
+ pdev->dev.of_node = of_node;
pdev->dev.parent = dev;
- /* Associate subdevice with the corresponding port node */
- pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i);
- if (!pdev->dev.of_node) {
- dev_err(dev, "missing port@%d node in %s\n", i,
- dev->of_node->full_name);
- ret = -ENODEV;
- goto err_register;
- }
-
ret = platform_device_add_data(pdev, &reg->pdata,
sizeof(reg->pdata));
if (!ret)
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index c6f7a694f67a..ec791e169f8f 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1897,6 +1897,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
@@ -2615,9 +2616,10 @@ int hid_add_device(struct hid_device *hdev)
/*
* Scan generic devices for group information
*/
- if (hid_ignore_special_drivers ||
- (!hdev->group &&
- !hid_match_id(hdev, hid_have_special_driver))) {
+ if (hid_ignore_special_drivers) {
+ hdev->group = HID_GROUP_GENERIC;
+ } else if (!hdev->group &&
+ !hid_match_id(hdev, hid_have_special_driver)) {
ret = hid_scan_report(hdev);
if (ret)
hid_warn(hdev, "bad device descriptor (%d)\n", ret);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 3d664d01305e..c5ec4f915594 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -357,8 +357,19 @@ static void mt_feature_mapping(struct hid_device *hdev,
break;
}
- td->inputmode = field->report->id;
- td->inputmode_index = usage->usage_index;
+ if (td->inputmode < 0) {
+ td->inputmode = field->report->id;
+ td->inputmode_index = usage->usage_index;
+ } else {
+ /*
+ * Some elan panels wrongly declare 2 input mode
+ * features, and silently ignore when we set the
+ * value in the second field. Skip the second feature
+ * and hope for the best.
+ */
+ dev_info(&hdev->dev,
+ "Ignoring the extra HID_DG_INPUTMODE\n");
+ }
break;
case HID_DG_CONTACTMAX:
@@ -385,6 +396,11 @@ static void mt_feature_mapping(struct hid_device *hdev,
td->is_buttonpad = true;
break;
+ case 0xff0000c5:
+ /* Retrieve the Win8 blob once to enable some devices */
+ if (usage->usage_index == 0)
+ mt_get_feature(hdev, field->report);
+ break;
}
}
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 10bd8e6e4c9c..0b80633bae91 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -282,17 +282,21 @@ static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
+ u16 size;
+ int args_len;
+ int index = 0;
+
+ i2c_hid_dbg(ihid, "%s\n", __func__);
+
+ if (data_len > ihid->bufsize)
+ return -EINVAL;
- /* hid_hw_* already checked that data_len < HID_MAX_BUFFER_SIZE */
- u16 size = 2 /* size */ +
+ size = 2 /* size */ +
(reportID ? 1 : 0) /* reportID */ +
data_len /* buf */;
- int args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
+ args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
2 /* dataRegister */ +
size /* args */;
- int index = 0;
-
- i2c_hid_dbg(ihid, "%s\n", __func__);
if (!use_data && maxOutputLength == 0)
return -ENOSYS;
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 36712e9f56c2..0df32fe0e345 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -477,8 +477,6 @@ static void hid_ctrl(struct urb *urb)
struct usbhid_device *usbhid = hid->driver_data;
int unplug = 0, status = urb->status;
- spin_lock(&usbhid->lock);
-
switch (status) {
case 0: /* success */
if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_IN)
@@ -498,6 +496,8 @@ static void hid_ctrl(struct urb *urb)
hid_warn(urb->dev, "ctrl urb status %d received\n", status);
}
+ spin_lock(&usbhid->lock);
+
if (unplug) {
usbhid->ctrltail = usbhid->ctrlhead;
} else {
@@ -951,14 +951,6 @@ static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
return ret;
}
-static void usbhid_restart_queues(struct usbhid_device *usbhid)
-{
- if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
- usbhid_restart_out_queue(usbhid);
- if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
- usbhid_restart_ctrl_queue(usbhid);
-}
-
static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
@@ -1404,6 +1396,37 @@ static void hid_cease_io(struct usbhid_device *usbhid)
usb_kill_urb(usbhid->urbout);
}
+static void hid_restart_io(struct hid_device *hid)
+{
+ struct usbhid_device *usbhid = hid->driver_data;
+ int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
+ int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
+
+ spin_lock_irq(&usbhid->lock);
+ clear_bit(HID_SUSPENDED, &usbhid->iofl);
+ usbhid_mark_busy(usbhid);
+
+ if (clear_halt || reset_pending)
+ schedule_work(&usbhid->reset_work);
+ usbhid->retry_delay = 0;
+ spin_unlock_irq(&usbhid->lock);
+
+ if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
+ return;
+
+ if (!clear_halt) {
+ if (hid_start_in(hid) < 0)
+ hid_io_error(hid);
+ }
+
+ spin_lock_irq(&usbhid->lock);
+ if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
+ usbhid_restart_out_queue(usbhid);
+ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+ usbhid_restart_ctrl_queue(usbhid);
+ spin_unlock_irq(&usbhid->lock);
+}
+
/* Treat USB reset pretty much the same as suspend/resume */
static int hid_pre_reset(struct usb_interface *intf)
{
@@ -1453,14 +1476,14 @@ static int hid_post_reset(struct usb_interface *intf)
return 1;
}
+ /* No need to do another reset or clear a halted endpoint */
spin_lock_irq(&usbhid->lock);
clear_bit(HID_RESET_PENDING, &usbhid->iofl);
+ clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
- usbhid_restart_queues(usbhid);
+
+ hid_restart_io(hid);
return 0;
}
@@ -1483,25 +1506,9 @@ void usbhid_put_power(struct hid_device *hid)
#ifdef CONFIG_PM
static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
{
- struct usbhid_device *usbhid = hid->driver_data;
- int status;
-
- spin_lock_irq(&usbhid->lock);
- clear_bit(HID_SUSPENDED, &usbhid->iofl);
- usbhid_mark_busy(usbhid);
-
- if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
- test_bit(HID_RESET_PENDING, &usbhid->iofl))
- schedule_work(&usbhid->reset_work);
- usbhid->retry_delay = 0;
-
- usbhid_restart_queues(usbhid);
- spin_unlock_irq(&usbhid->lock);
-
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
+ int status = 0;
+ hid_restart_io(hid);
if (driver_suspended && hid->driver && hid->driver->resume)
status = hid->driver->resume(hid);
return status;
@@ -1570,12 +1577,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
static int hid_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata (intf);
- struct usbhid_device *usbhid = hid->driver_data;
int status;
- if (!test_bit(HID_STARTED, &usbhid->iofl))
- return 0;
-
status = hid_resume_common(hid, true);
dev_dbg(&intf->dev, "resume status %d\n", status);
return 0;
@@ -1584,10 +1587,8 @@ static int hid_resume(struct usb_interface *intf)
static int hid_reset_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata(intf);
- struct usbhid_device *usbhid = hid->driver_data;
int status;
- clear_bit(HID_SUSPENDED, &usbhid->iofl);
status = hid_post_reset(intf);
if (status >= 0 && hid->driver && hid->driver->reset_resume) {
int ret = hid->driver->reset_resume(hid);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 01a4f05c1642..3c0f47ac8e53 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2493,6 +2493,17 @@ void wacom_setup_device_quirks(struct wacom *wacom)
}
/*
+ * Hack for the Bamboo One:
+ * the device presents a PAD/Touch interface as most Bamboos and even
+ * sends ghosts PAD data on it. However, later, we must disable this
+ * ghost interface, and we can not detect it unless we set it here
+ * to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH.
+ */
+ if (features->type == BAMBOO_PEN &&
+ features->pktlen == WACOM_PKGLEN_BBTOUCH3)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+
+ /*
* Raw Wacom-mode pen and touch events both come from interface
* 0, whose HID descriptor has an application usage of 0xFF0D
* (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index c4dcab048cb8..9098f13f2f44 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -630,10 +630,19 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
* on the ring. We will not signal if more data is
* to be placed.
*
+ * Based on the channel signal state, we will decide
+ * which signaling policy will be applied.
+ *
* If we cannot write to the ring-buffer; signal the host
* even if we may not have written anything. This is a rare
* enough condition that it should not matter.
*/
+
+ if (channel->signal_policy)
+ signal = true;
+ else
+ kick_q = true;
+
if (((ret == 0) && kick_q && signal) || (ret))
vmbus_setevent(channel);
@@ -733,10 +742,19 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
* on the ring. We will not signal if more data is
* to be placed.
*
+ * Based on the channel signal state, we will decide
+ * which signaling policy will be applied.
+ *
* If we cannot write to the ring-buffer; signal the host
* even if we may not have written anything. This is a rare
* enough condition that it should not matter.
*/
+
+ if (channel->signal_policy)
+ signal = true;
+ else
+ kick_q = true;
+
if (((ret == 0) && kick_q && signal) || (ret))
vmbus_setevent(channel);
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index f155b8380481..2b3105c8aed3 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
struct ads1015_data *data = i2c_get_clientdata(client);
unsigned int pga = data->channel_data[channel].pga;
int fullscale = fullscale_table[pga];
- const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
+ const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
return DIV_ROUND_CLOSEST(reg * fullscale, mask);
}
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index c8487894b312..c43318d3416e 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -932,6 +932,17 @@ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
{
/*
+ * CPU fan speed going up and down on Dell Studio XPS 8000
+ * for unknown reasons.
+ */
+ .ident = "Dell Studio XPS 8000",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8000"),
+ },
+ },
+ {
+ /*
* CPU fan speed going up and down on Dell Studio XPS 8100
* for unknown reasons.
*/
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 82de3deeb18a..685568b1236d 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -406,16 +406,11 @@ static int gpio_fan_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct gpio_fan_data *fan_data = cdev->devdata;
- int r;
if (!fan_data)
return -EINVAL;
- r = get_fan_speed_index(fan_data);
- if (r < 0)
- return r;
-
- *state = r;
+ *state = fan_data->speed_index;
return 0;
}
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index 36544c4f653c..303d0c9df907 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
int max1111_read_channel(int channel)
{
+ if (!the_max1111 || !the_max1111->spi)
+ return -ENODEV;
+
return max1111_read(&the_max1111->spi->dev, channel);
}
EXPORT_SYMBOL(max1111_read_channel);
@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
{
struct max1111_data *data = spi_get_drvdata(spi);
+#ifdef CONFIG_SHARPSL_PM
+ the_max1111 = NULL;
+#endif
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 52f708bcf77f..d50c701b19d6 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -313,6 +313,10 @@ int of_hwspin_lock_get_id(struct device_node *np, int index)
hwlock = radix_tree_deref_slot(slot);
if (unlikely(!hwlock))
continue;
+ if (radix_tree_is_indirect_ptr(hwlock)) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
if (hwlock->bank->dev->of_node == args.np) {
ret = 0;
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index e25492137d8b..93738dfbf631 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -548,7 +548,7 @@ static int coresight_name_match(struct device *dev, void *data)
to_match = data;
i_csdev = to_coresight_device(dev);
- if (!strcmp(to_match, dev_name(&i_csdev->dev)))
+ if (to_match && !strcmp(to_match, dev_name(&i_csdev->dev)))
return 1;
return 0;
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 8e9637eea512..81115abf3c1f 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -562,8 +562,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
- dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(struct bsc_regs *),
- GFP_KERNEL);
+ dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(*dev->bsc_regmap), GFP_KERNEL);
if (!dev->bsc_regmap)
return -ENOMEM;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index f62d69799a9c..27fa0cb09538 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1271,6 +1271,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
switch (dev->device) {
case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
+ case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
+ case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index cd4510a63375..146eed70bdf4 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -65,7 +65,7 @@
#include <asm/mwait.h>
#include <asm/msr.h>
-#define INTEL_IDLE_VERSION "0.4"
+#define INTEL_IDLE_VERSION "0.4.1"
#define PREFIX "intel_idle: "
static struct cpuidle_driver intel_idle_driver = {
@@ -994,36 +994,92 @@ static void intel_idle_cpuidle_devices_uninit(void)
}
/*
- * intel_idle_state_table_update()
- *
- * Update the default state_table for this CPU-id
+ * ivt_idle_state_table_update(void)
*
- * Currently used to access tuned IVT multi-socket targets
+ * Tune IVT multi-socket targets
* Assumption: num_sockets == (max_package_num + 1)
*/
-void intel_idle_state_table_update(void)
+static void ivt_idle_state_table_update(void)
{
/* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
- if (boot_cpu_data.x86_model == 0x3e) { /* IVT */
- int cpu, package_num, num_sockets = 1;
-
- for_each_online_cpu(cpu) {
- package_num = topology_physical_package_id(cpu);
- if (package_num + 1 > num_sockets) {
- num_sockets = package_num + 1;
-
- if (num_sockets > 4) {
- cpuidle_state_table = ivt_cstates_8s;
- return;
- }
+ int cpu, package_num, num_sockets = 1;
+
+ for_each_online_cpu(cpu) {
+ package_num = topology_physical_package_id(cpu);
+ if (package_num + 1 > num_sockets) {
+ num_sockets = package_num + 1;
+
+ if (num_sockets > 4) {
+ cpuidle_state_table = ivt_cstates_8s;
+ return;
}
}
+ }
+
+ if (num_sockets > 2)
+ cpuidle_state_table = ivt_cstates_4s;
+
+ /* else, 1 and 2 socket systems use default ivt_cstates */
+}
+/*
+ * sklh_idle_state_table_update(void)
+ *
+ * On SKL-H (model 0x5e) disable C8 and C9 if:
+ * C10 is enabled and SGX disabled
+ */
+static void sklh_idle_state_table_update(void)
+{
+ unsigned long long msr;
+ unsigned int eax, ebx, ecx, edx;
+
+
+ /* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */
+ if (max_cstate <= 7)
+ return;
+
+ /* if PC10 not present in CPUID.MWAIT.EDX */
+ if ((mwait_substates & (0xF << 28)) == 0)
+ return;
+
+ rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr);
+
+ /* PC10 is not enabled in PKG C-state limit */
+ if ((msr & 0xF) != 8)
+ return;
+
+ ecx = 0;
+ cpuid(7, &eax, &ebx, &ecx, &edx);
+
+ /* if SGX is present */
+ if (ebx & (1 << 2)) {
- if (num_sockets > 2)
- cpuidle_state_table = ivt_cstates_4s;
- /* else, 1 and 2 socket systems use default ivt_cstates */
+ rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
+
+ /* if SGX is enabled */
+ if (msr & (1 << 18))
+ return;
+ }
+
+ skl_cstates[5].disabled = 1; /* C8-SKL */
+ skl_cstates[6].disabled = 1; /* C9-SKL */
+}
+/*
+ * intel_idle_state_table_update()
+ *
+ * Update the default state_table for this CPU-id
+ */
+
+static void intel_idle_state_table_update(void)
+{
+ switch (boot_cpu_data.x86_model) {
+
+ case 0x3e: /* IVT */
+ ivt_idle_state_table_update();
+ break;
+ case 0x5e: /* SKL-H */
+ sklh_idle_state_table_update();
+ break;
}
- return;
}
/*
@@ -1063,6 +1119,14 @@ static int __init intel_idle_cpuidle_driver_init(void)
if (num_substates == 0)
continue;
+ /* if state marked as disabled, skip it */
+ if (cpuidle_state_table[cstate].disabled != 0) {
+ pr_debug(PREFIX "state %s is disabled",
+ cpuidle_state_table[cstate].name);
+ continue;
+ }
+
+
if (((mwait_cstate + 1) > 2) &&
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halts in idle"
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 969428dd6329..16cc5c691a55 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -173,6 +173,7 @@ config STK8312
config STK8BA50
tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver"
depends on I2C
+ depends on IIO_TRIGGER
help
Say yes here to get support for the Sensortek STK8BA50 3-axis
accelerometer.
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 2d33f1e821db..291c61a41c9a 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -547,7 +547,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
{
int ret;
int axis = chan->scan_index;
- unsigned int raw_val;
+ __le16 raw_val;
mutex_lock(&data->mutex);
ret = bmc150_accel_set_power_state(data, true);
@@ -557,14 +557,14 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
}
ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
- &raw_val, 2);
+ &raw_val, sizeof(raw_val));
if (ret < 0) {
dev_err(data->dev, "Error reading axis %d\n", axis);
bmc150_accel_set_power_state(data, false);
mutex_unlock(&data->mutex);
return ret;
}
- *val = sign_extend32(raw_val >> chan->scan_type.shift,
+ *val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
chan->scan_type.realbits - 1);
ret = bmc150_accel_set_power_state(data, false);
mutex_unlock(&data->mutex);
@@ -988,6 +988,7 @@ static const struct iio_event_spec bmc150_accel_event = {
.realbits = (bits), \
.storagebits = 16, \
.shift = 16 - (bits), \
+ .endianness = IIO_LE, \
}, \
.event_spec = &bmc150_accel_event, \
.num_event_specs = 1 \
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 7868c744fd4b..1e7aded53117 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -372,6 +372,7 @@ config TWL6030_GPADC
config VF610_ADC
tristate "Freescale vf610 ADC driver"
depends on OF
+ depends on HAS_IOMEM
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 942320e32753..c1e05532d437 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -289,7 +289,7 @@ static int tiadc_iio_buffered_hardware_setup(struct iio_dev *indio_dev,
goto error_kfifo_free;
indio_dev->setup_ops = setup_ops;
- indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+ indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
return 0;
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 43d14588448d..b4dde8315210 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -300,6 +300,7 @@ static int mcp4725_probe(struct i2c_client *client,
data->client = client;
indio_dev->dev.parent = &client->dev;
+ indio_dev->name = id->name;
indio_dev->info = &mcp4725_info;
indio_dev->channels = &mcp4725_channel;
indio_dev->num_channels = 1;
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 02ff789852a0..acb3b303d800 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -452,7 +452,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
{
int ret;
- unsigned int raw_val;
+ __le16 raw_val;
mutex_lock(&data->mutex);
ret = bmg160_set_power_state(data, true);
@@ -462,7 +462,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
}
ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
- 2);
+ sizeof(raw_val));
if (ret < 0) {
dev_err(data->dev, "Error reading axis %d\n", axis);
bmg160_set_power_state(data, false);
@@ -470,7 +470,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
return ret;
}
- *val = sign_extend32(raw_val, 15);
+ *val = sign_extend32(le16_to_cpu(raw_val), 15);
ret = bmg160_set_power_state(data, false);
mutex_unlock(&data->mutex);
if (ret < 0)
@@ -733,6 +733,7 @@ static const struct iio_event_spec bmg160_event = {
.sign = 's', \
.realbits = 16, \
.storagebits = 16, \
+ .endianness = IIO_LE, \
}, \
.event_spec = &bmg160_event, \
.num_event_specs = 1 \
@@ -780,7 +781,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
mutex_unlock(&data->mutex);
goto err;
}
- data->buffer[i++] = ret;
+ data->buffer[i++] = val;
}
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index cb32b593f1c5..36607d52fee0 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -43,7 +43,7 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
return -ENOMEM;
rx = adis->buffer;
- tx = rx + indio_dev->scan_bytes;
+ tx = rx + scan_count;
spi_message_init(&adis->msg);
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index c8bad3cf891d..217e9306aa0f 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -351,6 +351,8 @@ EXPORT_SYMBOL_GPL(iio_channel_get);
void iio_channel_release(struct iio_channel *channel)
{
+ if (!channel)
+ return;
iio_device_put(channel->indio_dev);
kfree(channel);
}
diff --git a/drivers/iio/light/acpi-als.c b/drivers/iio/light/acpi-als.c
index 60537ec0c923..53201d99a16c 100644
--- a/drivers/iio/light/acpi-als.c
+++ b/drivers/iio/light/acpi-als.c
@@ -54,7 +54,9 @@ static const struct iio_chan_spec acpi_als_channels[] = {
.realbits = 32,
.storagebits = 32,
},
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ /* _RAW is here for backward ABI compatibility */
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PROCESSED),
},
};
@@ -152,7 +154,7 @@ static int acpi_als_read_raw(struct iio_dev *indio_dev,
s32 temp_val;
int ret;
- if (mask != IIO_CHAN_INFO_RAW)
+ if ((mask != IIO_CHAN_INFO_PROCESSED) && (mask != IIO_CHAN_INFO_RAW))
return -EINVAL;
/* we support only illumination (_ALI) so far. */
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 809a961b9a7f..6bf89d8f3741 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -180,7 +180,7 @@ static const struct ltr501_samp_table ltr501_ps_samp_table[] = {
{500000, 2000000}
};
-static unsigned int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
+static int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
int len, int val, int val2)
{
int i, freq;
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index 06a4d9c35581..9daca4681922 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -44,6 +44,7 @@ static inline int st_magn_allocate_ring(struct iio_dev *indio_dev)
static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
{
}
+#define ST_MAGN_TRIGGER_SET_STATE NULL
#endif /* CONFIG_IIO_BUFFER */
#endif /* ST_MAGN_H */
diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c
index f5ecd6e19f5d..a0d7deeac62f 100644
--- a/drivers/iio/pressure/mpl115.c
+++ b/drivers/iio/pressure/mpl115.c
@@ -117,7 +117,7 @@ static int mpl115_read_raw(struct iio_dev *indio_dev,
*val = ret >> 6;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
- *val = 605;
+ *val = -605;
*val2 = 750000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 0a26dd6d9b19..d6d2b3582910 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -782,11 +782,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
/* Check if the device started its remove_one */
- spin_lock_irq(&cm.lock);
+ spin_lock_irqsave(&cm.lock, flags);
if (!cm_dev->going_down)
queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
msecs_to_jiffies(wait_time));
- spin_unlock_irq(&cm.lock);
+ spin_unlock_irqrestore(&cm.lock, flags);
cm_id_priv->timewait_info = NULL;
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 2d762a2ecd81..17a15c56028c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -453,7 +453,7 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
return ret;
- if (dev_type == ARPHRD_ETHER)
+ if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
ndev = dev_get_by_index(&init_net, bound_if_index);
ret = ib_find_cached_gid_by_port(device, gid, port, ndev, NULL);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index cb78b1e9bcd9..f504ba73e5dc 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en
error = l2t_send(tdev, skb, l2e);
if (error < 0)
kfree_skb(skb);
- return error;
+ return error < 0 ? error : 0;
}
int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
error = cxgb3_ofld_send(tdev, skb);
if (error < 0)
kfree_skb(skb);
- return error;
+ return error < 0 ? error : 0;
}
static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 3dfd287256d6..92ddae101ecc 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -756,7 +756,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
int uninitialized_var(index);
int uninitialized_var(inlen);
int cqe_size;
- int irqn;
+ unsigned int irqn;
int eqn;
int err;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 7e97cb55a6bf..c4e091528390 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -275,7 +275,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_sge = min(max_rq_sg, max_sq_sg);
props->max_sge_rd = props->max_sge;
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
- props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
+ props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 40f85bb3e0d3..3eff35c2d453 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -100,9 +100,10 @@ static u32 credit_table[31] = {
32768 /* 1E */
};
-static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
+static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
+ gfp_t gfp)
{
- unsigned long page = get_zeroed_page(GFP_KERNEL);
+ unsigned long page = get_zeroed_page(gfp);
/*
* Free the page if someone raced with us installing it.
@@ -121,7 +122,7 @@ static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
* zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
*/
static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
- enum ib_qp_type type, u8 port)
+ enum ib_qp_type type, u8 port, gfp_t gfp)
{
u32 i, offset, max_scan, qpn;
struct qpn_map *map;
@@ -151,7 +152,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
max_scan = qpt->nmaps - !offset;
for (i = 0;;) {
if (unlikely(!map->page)) {
- get_map_page(qpt, map);
+ get_map_page(qpt, map, gfp);
if (unlikely(!map->page))
break;
}
@@ -983,13 +984,21 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
size_t sz;
size_t sg_list_sz;
struct ib_qp *ret;
+ gfp_t gfp;
+
if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
- init_attr->create_flags) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
+ init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
+ return ERR_PTR(-EINVAL);
+
+ /* GFP_NOIO is applicable in RC QPs only */
+ if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
+ init_attr->qp_type != IB_QPT_RC)
+ return ERR_PTR(-EINVAL);
+
+ gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
+ GFP_NOIO : GFP_KERNEL;
/* Check receive queue parameters if no SRQ is specified. */
if (!init_attr->srq) {
@@ -1021,7 +1030,8 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
sz = sizeof(struct qib_sge) *
init_attr->cap.max_send_sge +
sizeof(struct qib_swqe);
- swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
+ swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz,
+ gfp, PAGE_KERNEL);
if (swq == NULL) {
ret = ERR_PTR(-ENOMEM);
goto bail;
@@ -1037,13 +1047,13 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
} else if (init_attr->cap.max_recv_sge > 1)
sg_list_sz = sizeof(*qp->r_sg_list) *
(init_attr->cap.max_recv_sge - 1);
- qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
+ qp = kzalloc(sz + sg_list_sz, gfp);
if (!qp) {
ret = ERR_PTR(-ENOMEM);
goto bail_swq;
}
RCU_INIT_POINTER(qp->next, NULL);
- qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
+ qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp);
if (!qp->s_hdr) {
ret = ERR_PTR(-ENOMEM);
goto bail_qp;
@@ -1058,8 +1068,16 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
sizeof(struct qib_rwqe);
- qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
- qp->r_rq.size * sz);
+ if (gfp != GFP_NOIO)
+ qp->r_rq.wq = vmalloc_user(
+ sizeof(struct qib_rwq) +
+ qp->r_rq.size * sz);
+ else
+ qp->r_rq.wq = __vmalloc(
+ sizeof(struct qib_rwq) +
+ qp->r_rq.size * sz,
+ gfp, PAGE_KERNEL);
+
if (!qp->r_rq.wq) {
ret = ERR_PTR(-ENOMEM);
goto bail_qp;
@@ -1090,7 +1108,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
dev = to_idev(ibpd->device);
dd = dd_from_dev(dev);
err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
- init_attr->port_num);
+ init_attr->port_num, gfp);
if (err < 0) {
ret = ERR_PTR(err);
vfree(qp->r_rq.wq);
diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
index f8ea069a3eaf..b2fb5286dbd9 100644
--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
@@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
struct qib_ibdev *dev = to_idev(ibqp->device);
struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
struct qib_mcast *mcast = NULL;
- struct qib_mcast_qp *p, *tmp;
+ struct qib_mcast_qp *p, *tmp, *delp = NULL;
struct rb_node *n;
int last = 0;
int ret;
- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
- ret = -EINVAL;
- goto bail;
- }
+ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
+ return -EINVAL;
spin_lock_irq(&ibp->lock);
@@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
while (1) {
if (n == NULL) {
spin_unlock_irq(&ibp->lock);
- ret = -EINVAL;
- goto bail;
+ return -EINVAL;
}
mcast = rb_entry(n, struct qib_mcast, rb_node);
@@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
*/
list_del_rcu(&p->list);
mcast->n_attached--;
+ delp = p;
/* If this was the last attached QP, remove the GID too. */
if (list_empty(&mcast->qp_list)) {
@@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
}
spin_unlock_irq(&ibp->lock);
+ /* QP not attached */
+ if (!delp)
+ return -EINVAL;
+ /*
+ * Wait for any list walkers to finish before freeing the
+ * list element.
+ */
+ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+ qib_mcast_qp_free(delp);
- if (p) {
- /*
- * Wait for any list walkers to finish before freeing the
- * list element.
- */
- wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
- qib_mcast_qp_free(p);
- }
if (last) {
atomic_dec(&mcast->refcount);
wait_event(mcast->wait, !atomic_read(&mcast->refcount));
@@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
dev->n_mcast_grps_allocated--;
spin_unlock_irq(&dev->n_mcast_grps_lock);
}
-
- ret = 0;
-
-bail:
- return ret;
+ return 0;
}
int qib_mcast_tree_empty(struct qib_ibport *ibp)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index f357ca67a41c..87799de90a1d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -456,7 +456,10 @@ out_locked:
return status;
}
-static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
+/*
+ * Caller must hold 'priv->lock'
+ */
+static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_sa_multicast *multicast;
@@ -466,6 +469,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
ib_sa_comp_mask comp_mask;
int ret = 0;
+ if (!priv->broadcast ||
+ !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ return -EINVAL;
+
ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
rec.mgid = mcast->mcmember.mgid;
@@ -525,20 +532,23 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
rec.join_state = 4;
#endif
}
+ spin_unlock_irq(&priv->lock);
multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
&rec, comp_mask, GFP_KERNEL,
ipoib_mcast_join_complete, mcast);
+ spin_lock_irq(&priv->lock);
if (IS_ERR(multicast)) {
ret = PTR_ERR(multicast);
ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
- spin_lock_irq(&priv->lock);
/* Requeue this join task with a backoff delay */
__ipoib_mcast_schedule_join_thread(priv, mcast, 1);
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
spin_unlock_irq(&priv->lock);
complete(&mcast->done);
+ spin_lock_irq(&priv->lock);
}
+ return 0;
}
void ipoib_mcast_join_task(struct work_struct *work)
@@ -620,9 +630,10 @@ void ipoib_mcast_join_task(struct work_struct *work)
/* Found the next unjoined group */
init_completion(&mcast->done);
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- spin_unlock_irq(&priv->lock);
- ipoib_mcast_join(dev, mcast);
- spin_lock_irq(&priv->lock);
+ if (ipoib_mcast_join(dev, mcast)) {
+ spin_unlock_irq(&priv->lock);
+ return;
+ }
} else if (!delay_until ||
time_before(mcast->delay_until, delay_until))
delay_until = mcast->delay_until;
@@ -641,10 +652,9 @@ out:
if (mcast) {
init_completion(&mcast->done);
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ ipoib_mcast_join(dev, mcast);
}
spin_unlock_irq(&priv->lock);
- if (mcast)
- ipoib_mcast_join(dev, mcast);
}
int ipoib_mcast_start_thread(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 8a51c3b5d657..b0edb66a291b 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -66,6 +66,7 @@ isert_rdma_accept(struct isert_conn *isert_conn);
struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
static void isert_release_work(struct work_struct *work);
+static void isert_wait4flush(struct isert_conn *isert_conn);
static inline bool
isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
@@ -815,12 +816,31 @@ isert_put_conn(struct isert_conn *isert_conn)
kref_put(&isert_conn->kref, isert_release_kref);
}
+static void
+isert_handle_unbound_conn(struct isert_conn *isert_conn)
+{
+ struct isert_np *isert_np = isert_conn->cm_id->context;
+
+ mutex_lock(&isert_np->mutex);
+ if (!list_empty(&isert_conn->node)) {
+ /*
+ * This means iscsi doesn't know this connection
+ * so schedule a cleanup ourselves
+ */
+ list_del_init(&isert_conn->node);
+ isert_put_conn(isert_conn);
+ complete(&isert_conn->wait);
+ queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+ mutex_unlock(&isert_np->mutex);
+}
+
/**
* isert_conn_terminate() - Initiate connection termination
* @isert_conn: isert connection struct
*
* Notes:
- * In case the connection state is FULL_FEATURE, move state
+ * In case the connection state is BOUND, move state
* to TEMINATING and start teardown sequence (rdma_disconnect).
* In case the connection state is UP, complete flush as well.
*
@@ -832,23 +852,19 @@ isert_conn_terminate(struct isert_conn *isert_conn)
{
int err;
- switch (isert_conn->state) {
- case ISER_CONN_TERMINATING:
- break;
- case ISER_CONN_UP:
- case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
- isert_info("Terminating conn %p state %d\n",
- isert_conn, isert_conn->state);
- isert_conn->state = ISER_CONN_TERMINATING;
- err = rdma_disconnect(isert_conn->cm_id);
- if (err)
- isert_warn("Failed rdma_disconnect isert_conn %p\n",
- isert_conn);
- break;
- default:
- isert_warn("conn %p teminating in state %d\n",
- isert_conn, isert_conn->state);
- }
+ if (isert_conn->state >= ISER_CONN_TERMINATING)
+ return;
+
+ isert_info("Terminating conn %p state %d\n",
+ isert_conn, isert_conn->state);
+ isert_conn->state = ISER_CONN_TERMINATING;
+ err = rdma_disconnect(isert_conn->cm_id);
+ if (err)
+ isert_warn("Failed rdma_disconnect isert_conn %p\n",
+ isert_conn);
+
+ isert_info("conn %p completing wait\n", isert_conn);
+ complete(&isert_conn->wait);
}
static int
@@ -882,35 +898,27 @@ static int
isert_disconnected_handler(struct rdma_cm_id *cma_id,
enum rdma_cm_event_type event)
{
- struct isert_np *isert_np = cma_id->context;
- struct isert_conn *isert_conn;
- bool terminating = false;
-
- if (isert_np->cm_id == cma_id)
- return isert_np_cma_handler(cma_id->context, event);
-
- isert_conn = cma_id->qp->qp_context;
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
mutex_lock(&isert_conn->mutex);
- terminating = (isert_conn->state == ISER_CONN_TERMINATING);
- isert_conn_terminate(isert_conn);
- mutex_unlock(&isert_conn->mutex);
-
- isert_info("conn %p completing wait\n", isert_conn);
- complete(&isert_conn->wait);
-
- if (terminating)
- goto out;
-
- mutex_lock(&isert_np->mutex);
- if (!list_empty(&isert_conn->node)) {
- list_del_init(&isert_conn->node);
- isert_put_conn(isert_conn);
- queue_work(isert_release_wq, &isert_conn->release_work);
+ switch (isert_conn->state) {
+ case ISER_CONN_TERMINATING:
+ break;
+ case ISER_CONN_UP:
+ isert_conn_terminate(isert_conn);
+ isert_wait4flush(isert_conn);
+ isert_handle_unbound_conn(isert_conn);
+ break;
+ case ISER_CONN_BOUND:
+ case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ break;
+ default:
+ isert_warn("conn %p teminating in state %d\n",
+ isert_conn, isert_conn->state);
}
- mutex_unlock(&isert_np->mutex);
+ mutex_unlock(&isert_conn->mutex);
-out:
return 0;
}
@@ -929,12 +937,16 @@ isert_connect_error(struct rdma_cm_id *cma_id)
static int
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
+ struct isert_np *isert_np = cma_id->context;
int ret = 0;
isert_info("%s (%d): status %d id %p np %p\n",
rdma_event_msg(event->event), event->event,
event->status, cma_id, cma_id->context);
+ if (isert_np->cm_id == cma_id)
+ return isert_np_cma_handler(cma_id->context, event->event);
+
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = isert_connect_request(cma_id, event);
@@ -980,13 +992,10 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
rx_wr--;
rx_wr->next = NULL; /* mark end of work requests list */
- isert_conn->post_recv_buf_count += count;
ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
&rx_wr_failed);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
- isert_conn->post_recv_buf_count -= count;
- }
return ret;
}
@@ -1002,12 +1011,9 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
rx_wr.num_sge = 1;
rx_wr.next = NULL;
- isert_conn->post_recv_buf_count++;
ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
- isert_conn->post_recv_buf_count--;
- }
return ret;
}
@@ -1120,12 +1126,9 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
- isert_conn->post_recv_buf_count++;
ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed: %d\n", ret);
- isert_conn->post_recv_buf_count--;
- }
return ret;
}
@@ -1620,7 +1623,6 @@ isert_rcv_completion(struct iser_rx_desc *desc,
ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
DMA_FROM_DEVICE);
- isert_conn->post_recv_buf_count--;
}
static int
@@ -2035,7 +2037,8 @@ is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
void *start = isert_conn->rx_descs;
int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
- if (wr_id >= start && wr_id < start + len)
+ if ((wr_id >= start && wr_id < start + len) ||
+ (wr_id == isert_conn->login_req_buf))
return false;
return true;
@@ -2059,10 +2062,6 @@ isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
isert_unmap_tx_desc(desc, ib_dev);
else
isert_completion_put(desc, isert_cmd, ib_dev, true);
- } else {
- isert_conn->post_recv_buf_count--;
- if (!isert_conn->post_recv_buf_count)
- iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
}
}
@@ -3193,6 +3192,7 @@ accept_wait:
conn->context = isert_conn;
isert_conn->conn = conn;
+ isert_conn->state = ISER_CONN_BOUND;
isert_set_conn_info(np, conn, isert_conn);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 3d7fbc47c343..1874d21daee0 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -50,6 +50,7 @@ enum iser_ib_op_code {
enum iser_conn_state {
ISER_CONN_INIT,
ISER_CONN_UP,
+ ISER_CONN_BOUND,
ISER_CONN_FULL_FEATURE,
ISER_CONN_TERMINATING,
ISER_CONN_DOWN,
@@ -144,7 +145,6 @@ struct isert_device;
struct isert_conn {
enum iser_conn_state state;
- int post_recv_buf_count;
u32 responder_resources;
u32 initiator_depth;
bool pi_support;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 2e2fe818ca9f..eaabf3125846 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1737,47 +1737,6 @@ send_sense:
return -1;
}
-/**
- * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
- * @ch: RDMA channel of the task management request.
- * @fn: Task management function to perform.
- * @req_tag: Tag of the SRP task management request.
- * @mgmt_ioctx: I/O context of the task management request.
- *
- * Returns zero if the target core will process the task management
- * request asynchronously.
- *
- * Note: It is assumed that the initiator serializes tag-based task management
- * requests.
- */
-static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
-{
- struct srpt_device *sdev;
- struct srpt_rdma_ch *ch;
- struct srpt_send_ioctx *target;
- int ret, i;
-
- ret = -EINVAL;
- ch = ioctx->ch;
- BUG_ON(!ch);
- BUG_ON(!ch->sport);
- sdev = ch->sport->sdev;
- BUG_ON(!sdev);
- spin_lock_irq(&sdev->spinlock);
- for (i = 0; i < ch->rq_size; ++i) {
- target = ch->ioctx_ring[i];
- if (target->cmd.se_lun == ioctx->cmd.se_lun &&
- target->cmd.tag == tag &&
- srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
- ret = 0;
- /* now let the target core abort &target->cmd; */
- break;
- }
- }
- spin_unlock_irq(&sdev->spinlock);
- return ret;
-}
-
static int srp_tmr_to_tcm(int fn)
{
switch (fn) {
@@ -1812,7 +1771,6 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
struct se_cmd *cmd;
struct se_session *sess = ch->sess;
uint64_t unpacked_lun;
- uint32_t tag = 0;
int tcm_tmr;
int rc;
@@ -1828,25 +1786,10 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
send_ioctx->cmd.tag = srp_tsk->tag;
tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
- if (tcm_tmr < 0) {
- send_ioctx->cmd.se_tmr_req->response =
- TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
- goto fail;
- }
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
sizeof(srp_tsk->lun));
-
- if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
- rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
- if (rc < 0) {
- send_ioctx->cmd.se_tmr_req->response =
- TMR_TASK_DOES_NOT_EXIST;
- goto fail;
- }
- tag = srp_tsk->task_tag;
- }
rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
- srp_tsk, tcm_tmr, GFP_KERNEL, tag,
+ srp_tsk, tcm_tmr, GFP_KERNEL, srp_tsk->task_tag,
TARGET_SCF_ACK_KREF);
if (rc != 0) {
send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 25ac47b9a180..32dc3bff73e6 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -812,4 +812,12 @@ config INPUT_DRV2667_HAPTICS
To compile this driver as a module, choose M here: the
module will be called drv2667-haptics.
+config HISI_POWERKEY
+ tristate "Hisilicon PMIC ONKEY support"
+ help
+ Say Y to enable support for PMIC ONKEY.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hisi_powerkey.
+
endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 66c3cc9f181c..001b16abb22e 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -77,3 +77,4 @@ obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o
obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o
obj-$(CONFIG_INPUT_YEALINK) += yealink.o
obj-$(CONFIG_INPUT_IDEAPAD_SLIDEBAR) += ideapad_slidebar.o
+obj-$(CONFIG_HISI_POWERKEY) += hisi_powerkey.o
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index cfd58e87da26..1c5914cae853 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -817,26 +817,49 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
ar2->udev = udev;
+ /* Sanity check, first interface must have an endpoint */
+ if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
+ dev_err(&interface->dev,
+ "%s(): interface 0 must have an endpoint\n", __func__);
+ r = -ENODEV;
+ goto fail1;
+ }
ar2->intf[0] = interface;
ar2->ep[0] = &alt->endpoint[0].desc;
+ /* Sanity check, the device must have two interfaces */
ar2->intf[1] = usb_ifnum_to_if(udev, 1);
+ if ((udev->actconfig->desc.bNumInterfaces < 2) || !ar2->intf[1]) {
+ dev_err(&interface->dev, "%s(): need 2 interfaces, found %d\n",
+ __func__, udev->actconfig->desc.bNumInterfaces);
+ r = -ENODEV;
+ goto fail1;
+ }
+
r = usb_driver_claim_interface(&ati_remote2_driver, ar2->intf[1], ar2);
if (r)
goto fail1;
+
+ /* Sanity check, second interface must have an endpoint */
alt = ar2->intf[1]->cur_altsetting;
+ if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
+ dev_err(&interface->dev,
+ "%s(): interface 1 must have an endpoint\n", __func__);
+ r = -ENODEV;
+ goto fail2;
+ }
ar2->ep[1] = &alt->endpoint[0].desc;
r = ati_remote2_urb_init(ar2);
if (r)
- goto fail2;
+ goto fail3;
ar2->channel_mask = channel_mask;
ar2->mode_mask = mode_mask;
r = ati_remote2_setup(ar2, ar2->channel_mask);
if (r)
- goto fail2;
+ goto fail3;
usb_make_path(udev, ar2->phys, sizeof(ar2->phys));
strlcat(ar2->phys, "/input0", sizeof(ar2->phys));
@@ -845,11 +868,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group);
if (r)
- goto fail2;
+ goto fail3;
r = ati_remote2_input_init(ar2);
if (r)
- goto fail3;
+ goto fail4;
usb_set_intfdata(interface, ar2);
@@ -857,10 +880,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
return 0;
- fail3:
+ fail4:
sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group);
- fail2:
+ fail3:
ati_remote2_urb_cleanup(ar2);
+ fail2:
usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
fail1:
kfree(ar2);
diff --git a/drivers/input/misc/hisi_powerkey.c b/drivers/input/misc/hisi_powerkey.c
new file mode 100644
index 000000000000..da4cda63b799
--- /dev/null
+++ b/drivers/input/misc/hisi_powerkey.c
@@ -0,0 +1,238 @@
+/*
+ * hisi_powerkey.c - Hisilicon MIC powerkey driver
+ *
+ * Copyright (C) 2013 Hisilicon Ltd.
+ * Copyright (C) 2015 Linaro Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/wakelock.h>
+#include <linux/reboot.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+
+/* the above held interrupt will trigger after 4 seconds */
+#define MAX_HELD_TIME (4 * HZ)
+
+#define irq_handler_id(x) button_irq_##x
+#define irq_handler_declaration(x) \
+static irqreturn_t irq_handler_id(x)(int irq, void *q)
+
+irq_handler_declaration(released);
+irq_handler_declaration(pressed);
+irq_handler_declaration(held);
+
+typedef irqreturn_t (*hi65xx_irq_handler) (int irq, void *data);
+enum { id_pressed, id_released, id_held, id_last };
+
+/*
+ * power key irq information
+ */
+static struct hi65xx_pkey_irq_info {
+ hi65xx_irq_handler handler;
+ const char *const name;
+ int irq;
+} irq_info[id_last] ={
+ [id_pressed] = {
+ .handler = irq_handler_id(pressed),
+ .name = "down",
+ .irq = -1,
+ },
+ [id_released] = {
+ .handler = irq_handler_id(released),
+ .name = "up",
+ .irq = -1,
+ },
+ [id_held] = {
+ .handler = irq_handler_id(held),
+ .name = "hold 4s",
+ .irq = -1,
+ },
+};
+
+/*
+ * power key events
+ */
+static struct key_report_pairs {
+ int code;
+ int value;
+} pkey_report[id_last] = {
+ [id_released] = {
+ .code = KEY_POWER,
+ .value = 0
+ },
+ [id_pressed] = {
+ .code = KEY_POWER,
+ .value = 1
+ },
+ [id_held] = {
+ .code = KEY_RESTART,
+ .value = 0
+ },
+};
+
+struct hi65xx_priv {
+ struct input_dev *input;
+ struct wake_lock wlock;
+};
+
+static inline void report_key(struct input_dev *dev, int id_action)
+{
+ /*
+ * track the state of the key held event since only ON/OFF values are
+ * allowed on EV_KEY types: KEY_RESTART will always toggle its value to
+ * guarantee that the event is passed to handlers (dispossition update).
+ */
+ if (id_action == id_held)
+ pkey_report[id_held].value ^= 1;
+
+ dev_dbg(dev->dev.parent, "received - code %d, value %d\n",
+ pkey_report[id_action].code,
+ pkey_report[id_action].value);
+
+ input_report_key(dev, pkey_report[id_action].code,
+ pkey_report[id_action].value);
+}
+
+#define irq_handler_definition(action) \
+irq_handler_declaration(action) \
+{ \
+ struct hi65xx_priv *p = q; \
+ \
+ if (irq != irq_info[id_##action].irq) \
+ return IRQ_NONE; \
+ \
+ wake_lock_timeout(&p->wlock, MAX_HELD_TIME); \
+ \
+ report_key(p->input, id_##action); \
+ input_sync(p->input); \
+ \
+ return IRQ_HANDLED; \
+}
+
+/*
+ * this device gives us three interrupts: power button pressed, released and
+ * held for four seconds.
+ */
+irq_handler_definition(released);
+irq_handler_definition(pressed);
+irq_handler_definition(held);
+
+static int hi65xx_powerkey_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hi65xx_priv *priv;
+ int irq, i, ret;
+
+ if (NULL == pdev) {
+ dev_err(dev, "parameter error\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(struct hi65xx_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->input = input_allocate_device();
+ if (!priv->input) {
+ dev_err(&pdev->dev, "failed to allocate input device\n");
+ return -ENOENT;
+ }
+
+ priv->input->evbit[0] = BIT_MASK(EV_KEY);
+ priv->input->dev.parent = &pdev->dev;
+ priv->input->phys = "hisi_on/input0";
+ priv->input->name = "HISI 65xx PowerOn Key";
+
+ for (i = 0; i < ARRAY_SIZE(pkey_report); i++)
+ input_set_capability(priv->input, EV_KEY, pkey_report[i].code);
+
+ for (i = 0; i < ARRAY_SIZE(irq_info); i++) {
+
+ irq = platform_get_irq_byname(pdev, irq_info[i].name);
+ if (irq < 0) {
+ dev_err(dev, "couldn't get irq %s\n", irq_info[i].name);
+ ret = irq;
+ goto err_irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, irq_info[i].handler,
+ IRQF_ONESHOT, irq_info[i].name, priv);
+ if (ret < 0) {
+ dev_err(dev, "couldn't get irq %s\n", irq_info[i].name);
+ goto err_irq;
+ }
+
+ irq_info[i].irq = irq;
+ }
+
+ wake_lock_init(&priv->wlock, WAKE_LOCK_SUSPEND, "hisi-powerkey");
+
+ ret = input_register_device(priv->input);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register input device: %d\n",
+ ret);
+ ret = -ENOENT;
+ goto err_register;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+err_register:
+ wake_lock_destroy(&priv->wlock);
+err_irq:
+ input_free_device(priv->input);
+
+ return ret;
+}
+
+static int hi65xx_powerkey_remove(struct platform_device *pdev)
+{
+ struct hi65xx_priv *priv = platform_get_drvdata(pdev);
+
+ wake_lock_destroy(&priv->wlock);
+ input_unregister_device(priv->input);
+
+ return 0;
+}
+
+static struct of_device_id hi65xx_powerkey_of_match[] = {
+ { .compatible = "hisilicon,hi6552-powerkey", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hi65xx_powerkey_of_match);
+
+static struct platform_driver hi65xx_powerkey_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "hi65xx-powerkey",
+ .of_match_table = hi65xx_powerkey_of_match,
+ },
+ .probe = hi65xx_powerkey_probe,
+ .remove = hi65xx_powerkey_remove,
+};
+
+module_platform_driver(hi65xx_powerkey_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Zhiliang Xue <xuezhiliang@huawei.com");
+MODULE_DESCRIPTION("Hisi PMIC Power key driver");
+MODULE_LICENSE("GPL v2");
+
+
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index ac1fa5f44580..9c0ea36913b4 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1663,6 +1663,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev,
union_desc->bMasterInterface0);
+ if (!pcu->ctrl_intf)
+ return -EINVAL;
alt = pcu->ctrl_intf->cur_altsetting;
pcu->ep_ctrl = &alt->endpoint[0].desc;
@@ -1670,6 +1672,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
pcu->data_intf = usb_ifnum_to_if(pcu->udev,
union_desc->bSlaveInterface0);
+ if (!pcu->data_intf)
+ return -EINVAL;
alt = pcu->data_intf->cur_altsetting;
if (alt->desc.bNumEndpoints != 2) {
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index 63b539d3daba..84909a12ff36 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -307,6 +307,9 @@ static int powermate_probe(struct usb_interface *intf, const struct usb_device_i
int error = -ENOMEM;
interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints < 1)
+ return -EINVAL;
+
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -EIO;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 537ebb0e193a..78f93cf68840 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1222,7 +1222,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
input_set_abs_params(dev, ABS_TOOL_WIDTH, ETP_WMIN_V2,
ETP_WMAX_V2, 0, 0);
}
- input_mt_init_slots(dev, 2, 0);
+ input_mt_init_slots(dev, 2, INPUT_MT_SEMI_MT);
input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
break;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 6025eb430c0a..a41d8328c064 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -862,8 +862,9 @@ static void synaptics_report_ext_buttons(struct psmouse *psmouse,
if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
return;
- /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
- if (SYN_ID_FULL(priv->identity) == 0x801 &&
+ /* Bug in FW 8.1 & 8.2, buttons are reported only when ExtBit is 1 */
+ if ((SYN_ID_FULL(priv->identity) == 0x801 ||
+ SYN_ID_FULL(priv->identity) == 0x802) &&
!((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
return;
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
index e272f06258ce..a3f0f5a47490 100644
--- a/drivers/input/mouse/vmmouse.c
+++ b/drivers/input/mouse/vmmouse.c
@@ -458,8 +458,6 @@ int vmmouse_init(struct psmouse *psmouse)
priv->abs_dev = abs_dev;
psmouse->private = priv;
- input_set_capability(rel_dev, EV_REL, REL_WHEEL);
-
/* Set up and register absolute device */
snprintf(priv->phys, sizeof(priv->phys), "%s/input1",
psmouse->ps2dev.serio->phys);
@@ -475,10 +473,6 @@ int vmmouse_init(struct psmouse *psmouse)
abs_dev->id.version = psmouse->model;
abs_dev->dev.parent = &psmouse->ps2dev.serio->dev;
- error = input_register_device(priv->abs_dev);
- if (error)
- goto init_fail;
-
/* Set absolute device capabilities */
input_set_capability(abs_dev, EV_KEY, BTN_LEFT);
input_set_capability(abs_dev, EV_KEY, BTN_RIGHT);
@@ -488,6 +482,13 @@ int vmmouse_init(struct psmouse *psmouse)
input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0);
input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0);
+ error = input_register_device(priv->abs_dev);
+ if (error)
+ goto init_fail;
+
+ /* Add wheel capability to the relative device */
+ input_set_capability(rel_dev, EV_REL, REL_WHEEL);
+
psmouse->protocol_handler = vmmouse_process_byte;
psmouse->disconnect = vmmouse_disconnect;
psmouse->reconnect = vmmouse_reconnect;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index c11556563ef0..68f5f4a0f1e7 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -258,6 +258,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
},
},
{
+ /* Fujitsu Lifebook U745 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
+ },
+ },
+ {
/* Fujitsu T70H */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 8b2be1e7714f..fc836f523afa 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1905,7 +1905,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
/* Update device table */
set_dte_entry(dev_data->devid, domain, ats);
if (alias != dev_data->devid)
- set_dte_entry(dev_data->devid, domain, ats);
+ set_dte_entry(alias, domain, ats);
device_flush_dte(dev_data);
}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 013bdfff2d4d..bf4959f4225b 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -228,6 +228,10 @@ static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(void);
+static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
+ u8 bank, u8 cntr, u8 fxn,
+ u64 *value, bool is_write);
+
static inline void update_last_devid(u16 devid)
{
if (devid > amd_iommu_last_bdf)
@@ -1016,6 +1020,34 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
}
/*
+ * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
+ * Workaround:
+ * BIOS should enable ATS write permission check by setting
+ * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
+ */
+static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
+{
+ u32 value;
+
+ if ((boot_cpu_data.x86 != 0x15) ||
+ (boot_cpu_data.x86_model < 0x30) ||
+ (boot_cpu_data.x86_model > 0x3f))
+ return;
+
+ /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
+ value = iommu_read_l2(iommu, 0x47);
+
+ if (value & BIT(0))
+ return;
+
+ /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
+ iommu_write_l2(iommu, 0x47, value | BIT(0));
+
+ pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
+ dev_name(&iommu->dev->dev));
+}
+
+/*
* This function clues the initialization function for one IOMMU
* together and also allocates the command buffer and programs the
* hardware. It does NOT enable the IOMMU. This is done afterwards.
@@ -1142,8 +1174,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
amd_iommu_pc_present = true;
/* Check if the performance counters can be written to */
- if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) ||
- (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) ||
+ if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
+ (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
(val != val2)) {
pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
amd_iommu_pc_present = false;
@@ -1284,6 +1316,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
}
amd_iommu_erratum_746_workaround(iommu);
+ amd_iommu_ats_write_check_workaround(iommu);
iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
amd_iommu_groups, "ivhd%d",
@@ -2283,22 +2316,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid)
}
EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
-int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
+static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
+ u8 bank, u8 cntr, u8 fxn,
u64 *value, bool is_write)
{
- struct amd_iommu *iommu;
u32 offset;
u32 max_offset_lim;
- /* Make sure the IOMMU PC resource is available */
- if (!amd_iommu_pc_present)
- return -ENODEV;
-
- /* Locate the iommu associated with the device ID */
- iommu = amd_iommu_rlookup_table[devid];
-
/* Check for valid iommu and pc register indexing */
- if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7)))
+ if (WARN_ON((fxn > 0x28) || (fxn & 7)))
return -ENODEV;
offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
@@ -2322,3 +2348,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
return 0;
}
EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
+
+int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
+ u64 *value, bool is_write)
+{
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+ /* Make sure the IOMMU PC resource is available */
+ if (!amd_iommu_pc_present || iommu == NULL)
+ return -ENODEV;
+
+ return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
+ value, is_write);
+}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 80e3c176008e..3821c4786662 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -329,7 +329,8 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
/* Only care about add/remove events for physical functions */
if (pdev->is_virtfn)
return NOTIFY_DONE;
- if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
+ if (action != BUS_NOTIFY_ADD_DEVICE &&
+ action != BUS_NOTIFY_REMOVED_DEVICE)
return NOTIFY_DONE;
info = dmar_alloc_pci_notify_info(pdev, action);
@@ -339,7 +340,7 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
down_write(&dmar_global_lock);
if (action == BUS_NOTIFY_ADD_DEVICE)
dmar_pci_bus_add_dev(info);
- else if (action == BUS_NOTIFY_DEL_DEVICE)
+ else if (action == BUS_NOTIFY_REMOVED_DEVICE)
dmar_pci_bus_del_dev(info);
up_write(&dmar_global_lock);
@@ -1347,7 +1348,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
raw_spin_lock_irqsave(&iommu->register_lock, flags);
- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
+ sts = readl(iommu->reg + DMAR_GSTS_REG);
if (!(sts & DMA_GSTS_QIES))
goto end;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index ac7387686ddc..a2e1b7f14df2 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1489,7 +1489,7 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
- if (dev_is_pci(info->dev))
+ if (!dev_is_pci(info->dev))
return;
pdev = to_pci_dev(info->dev);
@@ -4367,7 +4367,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
rmrru->devices_cnt);
if(ret < 0)
return ret;
- } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
+ } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
dmar_remove_dev_scope(info, rmrr->segment,
rmrru->devices, rmrru->devices_cnt);
}
@@ -4387,7 +4387,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
break;
else if(ret < 0)
return ret;
- } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
+ } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
if (dmar_remove_dev_scope(info, atsr->segment,
atsru->devices, atsru->devices_cnt))
break;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 50464833d0b8..d9939fa9b588 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -249,12 +249,30 @@ static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *s
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
+ struct intel_svm_dev *sdev;
+ /* This might end up being called from exit_mmap(), *before* the page
+ * tables are cleared. And __mmu_notifier_release() will delete us from
+ * the list of notifiers so that our invalidate_range() callback doesn't
+ * get called when the page tables are cleared. So we need to protect
+ * against hardware accessing those page tables.
+ *
+ * We do it by clearing the entry in the PASID table and then flushing
+ * the IOTLB and the PASID table caches. This might upset hardware;
+ * perhaps we'll want to point the PASID to a dummy PGD (like the zero
+ * page) so that we end up taking a fault that the hardware really
+ * *has* to handle gracefully without affecting other processes.
+ */
svm->iommu->pasid_table[svm->pasid].val = 0;
+ wmb();
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdev, &svm->devs, list) {
+ intel_flush_pasid_dev(svm, sdev, svm->pasid);
+ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
+ }
+ rcu_read_unlock();
- /* There's no need to do any flush because we can't get here if there
- * are any devices left anyway. */
- WARN_ON(!list_empty(&svm->devs));
}
static const struct mmu_notifier_ops intel_mmuops = {
@@ -379,7 +397,6 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
goto out;
}
iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
- mm = NULL;
} else
iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
wmb();
@@ -442,11 +459,11 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
- mmu_notifier_unregister(&svm->notifier, svm->mm);
idr_remove(&svm->iommu->pasid_idr, svm->pasid);
if (svm->mm)
- mmput(svm->mm);
+ mmu_notifier_unregister(&svm->notifier, svm->mm);
+
/* We mandate that no page faults may be outstanding
* for the PASID when intel_svm_unbind_mm() is called.
* If that is not obeyed, subtle errors will happen.
@@ -507,6 +524,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
struct intel_svm *svm = NULL;
int head, tail, handled = 0;
+ /* Clear PPR bit before reading head/tail registers, to
+ * ensure that we get a new interrupt if needed. */
+ writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
+
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
while (head != tail) {
@@ -551,6 +572,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
* any faults on kernel addresses. */
if (!svm->mm)
goto bad_req;
+ /* If the mm is already defunct, don't handle faults. */
+ if (!atomic_inc_not_zero(&svm->mm->mm_users))
+ goto bad_req;
down_read(&svm->mm->mmap_sem);
vma = find_extend_vma(svm->mm, address);
if (!vma || address < vma->vm_start)
@@ -567,6 +591,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
result = QI_RESP_SUCCESS;
invalid:
up_read(&svm->mm->mmap_sem);
+ mmput(svm->mm);
bad_req:
/* Accounting for major/minor faults? */
rcu_read_lock();
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 1fae1881648c..e9b241b1c9dd 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -629,7 +629,7 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
raw_spin_lock_irqsave(&iommu->register_lock, flags);
- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
+ sts = readl(iommu->reg + DMAR_GSTS_REG);
if (!(sts & DMA_GSTS_IRES))
goto end;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 7df97777662d..dad768caa9c5 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -405,17 +405,18 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
arm_lpae_iopte *start, *end;
unsigned long table_size;
- /* Only leaf entries at the last level */
- if (lvl == ARM_LPAE_MAX_LEVELS - 1)
- return;
-
if (lvl == ARM_LPAE_START_LVL(data))
table_size = data->pgd_size;
else
table_size = 1UL << data->pg_shift;
start = ptep;
- end = (void *)ptep + table_size;
+
+ /* Only leaf entries at the last level */
+ if (lvl == ARM_LPAE_MAX_LEVELS - 1)
+ end = ptep;
+ else
+ end = (void *)ptep + table_size;
while (ptep != end) {
arm_lpae_iopte pte = *ptep++;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 0e3b0092ec92..515bb8b80952 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -848,7 +848,8 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
if (!group->default_domain) {
group->default_domain = __iommu_domain_alloc(dev->bus,
IOMMU_DOMAIN_DMA);
- group->domain = group->default_domain;
+ if (!group->domain)
+ group->domain = group->default_domain;
}
ret = iommu_group_add_device(group, dev);
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index b12a5d58546f..37199b9b2cfa 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -86,7 +86,7 @@ int aic_common_set_priority(int priority, unsigned *val)
priority > AT91_AIC_IRQ_MAX_PRIORITY)
return -EINVAL;
- *val &= AT91_AIC_PRIOR;
+ *val &= ~AT91_AIC_PRIOR;
*val |= priority;
return 0;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index e23d1d18f9d6..a159529f9d53 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -597,11 +597,6 @@ static void its_unmask_irq(struct irq_data *d)
lpi_set_config(d, true);
}
-static void its_eoi_irq(struct irq_data *d)
-{
- gic_write_eoir(d->hwirq);
-}
-
static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
@@ -638,7 +633,7 @@ static struct irq_chip its_irq_chip = {
.name = "ITS",
.irq_mask = its_mask_irq,
.irq_unmask = its_unmask_irq,
- .irq_eoi = its_eoi_irq,
+ .irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = its_set_affinity,
.irq_compose_msi_msg = its_irq_compose_msi_msg,
};
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index c22e2d40cb30..efe50845939d 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -241,6 +241,7 @@ static int __init asm9260_of_init(struct device_node *np,
writel(0, icoll_priv.intr + i);
icoll_add_domain(np, ASM9260_NUM_IRQS);
+ set_handle_irq(icoll_handle_irq);
return 0;
}
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index 8587d0f8d8c0..f6cb1b8bb981 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -47,6 +47,7 @@
#define INTC_ILR0 0x0100
#define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */
+#define SPURIOUSIRQ_MASK (0x1ffffff << 7)
#define INTCPS_NR_ILR_REGS 128
#define INTCPS_NR_MIR_REGS 4
@@ -330,11 +331,35 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
static asmlinkage void __exception_irq_entry
omap_intc_handle_irq(struct pt_regs *regs)
{
+ extern unsigned long irq_err_count;
u32 irqnr;
irqnr = intc_readl(INTC_SIR);
+
+ /*
+ * A spurious IRQ can result if interrupt that triggered the
+ * sorting is no longer active during the sorting (10 INTC
+ * functional clock cycles after interrupt assertion). Or a
+ * change in interrupt mask affected the result during sorting
+ * time. There is no special handling required except ignoring
+ * the SIR register value just read and retrying.
+ * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K
+ *
+ * Many a times, a spurious interrupt situation has been fixed
+ * by adding a flush for the posted write acking the IRQ in
+ * the device driver. Typically, this is going be the device
+ * driver whose interrupt was handled just before the spurious
+ * IRQ occurred. Pay attention to those device drivers if you
+ * run into hitting the spurious IRQ condition below.
+ */
+ if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) {
+ pr_err_once("%s: spurious irq!\n", __func__);
+ irq_err_count++;
+ omap_ack_irq(NULL);
+ return;
+ }
+
irqnr &= ACTIVEIRQ_MASK;
- WARN_ONCE(!irqnr, "Spurious IRQ ?\n");
handle_domain_irq(domain, irqnr, regs);
}
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 546d05f4358a..d55d656c8d5a 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -78,6 +78,14 @@ config STI_MBOX
Mailbox implementation for STMicroelectonics family chips with
hardware for interprocessor communication.
+config HI6220_MBOX
+ tristate "Hi6220 Mailbox"
+ depends on ARCH_HISI
+ help
+ An implementation of the hi6220 mailbox. It is used to send message
+ between application processors and MCU. Say Y here if you want to build
+ the Hi6220 mailbox controller driver.
+
config MAILBOX_TEST
tristate "Mailbox Test Client"
depends on OF
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 92435ef11f26..565adda57e52 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -17,3 +17,5 @@ obj-$(CONFIG_ALTERA_MBOX) += mailbox-altera.o
obj-$(CONFIG_BCM2835_MBOX) += bcm2835-mailbox.o
obj-$(CONFIG_STI_MBOX) += mailbox-sti.o
+
+obj-$(CONFIG_HI6220_MBOX) += hi6220-mailbox.o
diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c
new file mode 100644
index 000000000000..c0e19d574a57
--- /dev/null
+++ b/drivers/mailbox/hi6220-mailbox.c
@@ -0,0 +1,371 @@
+/*
+ * Hisilicon's Hi6220 mailbox driver
+ *
+ * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 Linaro Limited.
+ *
+ * Author: Leo Yan <leo.yan@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define MBOX_CHAN_MAX 32
+#define MBOX_CHAN_NUM 2
+
+#define MBOX_RX 0x0
+#define MBOX_TX 0x1
+
+/* Mailbox message length: 8 words */
+#define MBOX_MSG_LEN 8
+
+/* Mailbox Registers */
+#define MBOX_OFF(m) (0x40 * (m))
+#define MBOX_MODE_REG(m) (MBOX_OFF(m) + 0x0)
+#define MBOX_DATA_REG(m) (MBOX_OFF(m) + 0x4)
+
+#define MBOX_STATE_MASK (0xF << 4)
+#define MBOX_STATE_IDLE (0x1 << 4)
+#define MBOX_STATE_TX (0x2 << 4)
+#define MBOX_STATE_RX (0x4 << 4)
+#define MBOX_STATE_ACK (0x8 << 4)
+#define MBOX_ACK_CONFIG_MASK (0x1 << 0)
+#define MBOX_ACK_AUTOMATIC (0x1 << 0)
+#define MBOX_ACK_IRQ (0x0 << 0)
+
+/* IPC registers */
+#define ACK_INT_RAW_REG(i) ((i) + 0x400)
+#define ACK_INT_MSK_REG(i) ((i) + 0x404)
+#define ACK_INT_STAT_REG(i) ((i) + 0x408)
+#define ACK_INT_CLR_REG(i) ((i) + 0x40c)
+#define ACK_INT_ENA_REG(i) ((i) + 0x500)
+#define ACK_INT_DIS_REG(i) ((i) + 0x504)
+#define DST_INT_RAW_REG(i) ((i) + 0x420)
+
+
+struct hi6220_mbox_chan {
+
+ /*
+ * Description for channel's hardware info:
+ * - direction: tx or rx
+ * - dst irq: peer core's irq number
+ * - ack irq: local irq number
+ * - slot number
+ */
+ unsigned int dir, dst_irq, ack_irq;
+ unsigned int slot;
+
+ struct hi6220_mbox *parent;
+};
+
+struct hi6220_mbox {
+ struct device *dev;
+
+ int irq;
+
+ /* flag of enabling tx's irq mode */
+ bool tx_irq_mode;
+
+ /* region for ipc event */
+ void __iomem *ipc;
+
+ /* region for mailbox */
+ void __iomem *base;
+
+ unsigned int chan_num;
+ struct hi6220_mbox_chan *mchan;
+
+ void *irq_map_chan[MBOX_CHAN_MAX];
+ struct mbox_chan *chan;
+ struct mbox_controller controller;
+};
+
+static void mbox_set_state(struct hi6220_mbox *mbox,
+ unsigned int slot, u32 val)
+{
+ u32 status;
+
+ status = readl(mbox->base + MBOX_MODE_REG(slot));
+ status = (status & ~MBOX_STATE_MASK) | val;
+ writel(status, mbox->base + MBOX_MODE_REG(slot));
+}
+
+static void mbox_set_mode(struct hi6220_mbox *mbox,
+ unsigned int slot, u32 val)
+{
+ u32 mode;
+
+ mode = readl(mbox->base + MBOX_MODE_REG(slot));
+ mode = (mode & ~MBOX_ACK_CONFIG_MASK) | val;
+ writel(mode, mbox->base + MBOX_MODE_REG(slot));
+}
+
+static bool hi6220_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+ u32 state;
+
+ /* Only set idle state for polling mode */
+ BUG_ON(mbox->tx_irq_mode);
+
+ state = readl(mbox->base + MBOX_MODE_REG(mchan->slot));
+ return ((state & MBOX_STATE_MASK) == MBOX_STATE_IDLE);
+}
+
+static int hi6220_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+ unsigned int slot = mchan->slot;
+ u32 *buf = msg;
+ int i;
+
+ mbox_set_state(mbox, slot, MBOX_STATE_TX);
+
+ if (mbox->tx_irq_mode)
+ mbox_set_mode(mbox, slot, MBOX_ACK_IRQ);
+ else
+ mbox_set_mode(mbox, slot, MBOX_ACK_AUTOMATIC);
+
+ for (i = 0; i < MBOX_MSG_LEN; i++)
+ writel(buf[i], mbox->base + MBOX_DATA_REG(slot) + i * 4);
+
+ /* trigger remote request */
+ writel(BIT(mchan->dst_irq), DST_INT_RAW_REG(mbox->ipc));
+ return 0;
+}
+
+static irqreturn_t hi6220_mbox_interrupt(int irq, void *p)
+{
+ struct hi6220_mbox *mbox = p;
+ struct hi6220_mbox_chan *mchan;
+ struct mbox_chan *chan;
+ unsigned int state, intr_bit, i;
+ u32 msg[MBOX_MSG_LEN];
+
+ state = readl(ACK_INT_STAT_REG(mbox->ipc));
+ if (!state) {
+ dev_warn(mbox->dev, "%s: spurious interrupt\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ while (state) {
+ intr_bit = __ffs(state);
+ state &= (state - 1);
+
+ chan = mbox->irq_map_chan[intr_bit];
+ if (!chan) {
+ dev_warn(mbox->dev, "%s: unexpected irq vector %d\n",
+ __func__, intr_bit);
+ continue;
+ }
+
+ mchan = chan->con_priv;
+ if (mchan->dir == MBOX_TX)
+ mbox_chan_txdone(chan, 0);
+ else {
+ for (i = 0; i < MBOX_MSG_LEN; i++)
+ msg[i] = readl(mbox->base +
+ MBOX_DATA_REG(mchan->slot) + i * 4);
+
+ mbox_chan_received_data(chan, (void *)msg);
+ }
+
+ /* clear IRQ source */
+ writel(BIT(mchan->ack_irq), ACK_INT_CLR_REG(mbox->ipc));
+ mbox_set_state(mbox, mchan->slot, MBOX_STATE_IDLE);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hi6220_mbox_startup(struct mbox_chan *chan)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+
+ mbox->irq_map_chan[mchan->ack_irq] = (void *)chan;
+
+ /* enable interrupt */
+ writel(BIT(mchan->ack_irq), ACK_INT_ENA_REG(mbox->ipc));
+ return 0;
+}
+
+static void hi6220_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+
+ /* disable interrupt */
+ writel(BIT(mchan->ack_irq), ACK_INT_DIS_REG(mbox->ipc));
+ mbox->irq_map_chan[mchan->ack_irq] = NULL;
+}
+
+static struct mbox_chan_ops hi6220_mbox_chan_ops = {
+ .send_data = hi6220_mbox_send_data,
+ .startup = hi6220_mbox_startup,
+ .shutdown = hi6220_mbox_shutdown,
+ .last_tx_done = hi6220_mbox_last_tx_done,
+};
+
+static void hi6220_mbox_init_hw(struct hi6220_mbox *mbox)
+{
+ struct hi6220_mbox_chan init_data[MBOX_CHAN_NUM] = {
+ { MBOX_RX, 1, 10 },
+ { MBOX_TX, 0, 11 },
+ };
+ struct hi6220_mbox_chan *mchan = mbox->mchan;
+ int i;
+
+ for (i = 0; i < MBOX_CHAN_NUM; i++) {
+ memcpy(&mchan[i], &init_data[i], sizeof(*mchan));
+ mchan[i].slot = i;
+ mchan[i].parent = mbox;
+ }
+
+ /* mask and clear all interrupt vectors */
+ writel(0x0, ACK_INT_MSK_REG(mbox->ipc));
+ writel(~0x0, ACK_INT_CLR_REG(mbox->ipc));
+
+ /* use interrupt for tx's ack */
+ mbox->tx_irq_mode = true;
+}
+
+static const struct of_device_id hi6220_mbox_of_match[] = {
+ { .compatible = "hisilicon,hi6220-mbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hi6220_mbox_of_match);
+
+static int hi6220_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hi6220_mbox *mbox;
+ struct resource *res;
+ int i, err;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->dev = dev;
+ mbox->chan_num = MBOX_CHAN_NUM;
+ mbox->mchan = devm_kzalloc(dev,
+ mbox->chan_num * sizeof(*mbox->mchan), GFP_KERNEL);
+ if (!mbox->mchan)
+ return -ENOMEM;
+
+ mbox->chan = devm_kzalloc(dev,
+ mbox->chan_num * sizeof(*mbox->chan), GFP_KERNEL);
+ if (!mbox->chan)
+ return -ENOMEM;
+
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (mbox->irq < 0)
+ return mbox->irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mbox->ipc = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mbox->ipc)) {
+ dev_err(dev, "ioremap ipc failed\n");
+ return PTR_ERR(mbox->ipc);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ mbox->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mbox->base)) {
+ dev_err(dev, "ioremap buffer failed\n");
+ return PTR_ERR(mbox->base);
+ }
+
+ err = devm_request_irq(dev, mbox->irq, hi6220_mbox_interrupt, 0,
+ dev_name(dev), mbox);
+ if (err) {
+ dev_err(dev, "Failed to register a mailbox IRQ handler: %d\n",
+ err);
+ return -ENODEV;
+ }
+
+ /* init hardware parameters */
+ hi6220_mbox_init_hw(mbox);
+
+ for (i = 0; i < mbox->chan_num; i++) {
+ mbox->chan[i].con_priv = &mbox->mchan[i];
+ mbox->irq_map_chan[i] = NULL;
+ }
+
+ mbox->controller.dev = dev;
+ mbox->controller.chans = &mbox->chan[0];
+ mbox->controller.num_chans = mbox->chan_num;
+ mbox->controller.ops = &hi6220_mbox_chan_ops;
+
+ if (mbox->tx_irq_mode)
+ mbox->controller.txdone_irq = true;
+ else {
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = 5;
+ }
+
+ err = mbox_controller_register(&mbox->controller);
+ if (err) {
+ dev_err(dev, "Failed to register mailbox %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, mbox);
+ dev_info(dev, "Mailbox enabled\n");
+ return 0;
+}
+
+static int hi6220_mbox_remove(struct platform_device *pdev)
+{
+ struct hi6220_mbox *mbox = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&mbox->controller);
+ return 0;
+}
+
+static struct platform_driver hi6220_mbox_driver = {
+ .driver = {
+ .name = "hi6220-mbox",
+ .owner = THIS_MODULE,
+ .of_match_table = hi6220_mbox_of_match,
+ },
+ .probe = hi6220_mbox_probe,
+ .remove = hi6220_mbox_remove,
+};
+
+static int __init hi6220_mbox_init(void)
+{
+ return platform_driver_register(&hi6220_mbox_driver);
+}
+core_initcall(hi6220_mbox_init);
+
+static void __exit hi6220_mbox_exit(void)
+{
+ platform_driver_unregister(&hi6220_mbox_driver);
+}
+module_exit(hi6220_mbox_exit);
+
+MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
+MODULE_DESCRIPTION("Hi6220 mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 83392f856dfd..22b9e34ceb75 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1741,6 +1741,7 @@ static void bch_btree_gc(struct cache_set *c)
do {
ret = btree_root(gc_root, c, &op, &writes, &stats);
closure_sync(&writes);
+ cond_resched();
if (ret && ret != -EAGAIN)
pr_warn("gc failed!");
@@ -2162,8 +2163,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
rw_lock(true, b, b->level);
if (b->key.ptr[0] != btree_ptr ||
- b->seq != seq + 1)
+ b->seq != seq + 1) {
+ op->lock = b->level;
goto out;
+ }
}
SET_KEY_PTRS(check_key, 1);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 679a093a3bf6..a296425a7270 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -685,6 +685,8 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
sysfs_create_link(&c->kobj, &d->kobj, d->name),
"Couldn't create device <-> cache set symlinks");
+
+ clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
}
static void bcache_device_detach(struct bcache_device *d)
@@ -847,8 +849,11 @@ void bch_cached_dev_run(struct cached_dev *dc)
buf[SB_LABEL_SIZE] = '\0';
env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
- if (atomic_xchg(&dc->running, 1))
+ if (atomic_xchg(&dc->running, 1)) {
+ kfree(env[1]);
+ kfree(env[2]);
return;
+ }
if (!d->c &&
BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
@@ -1010,8 +1015,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
*/
atomic_set(&dc->count, 1);
- if (bch_cached_dev_writeback_start(dc))
+ /* Block writeback thread, but spawn it */
+ down_write(&dc->writeback_lock);
+ if (bch_cached_dev_writeback_start(dc)) {
+ up_write(&dc->writeback_lock);
return -ENOMEM;
+ }
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
bch_sectors_dirty_init(dc);
@@ -1023,6 +1032,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
bch_cached_dev_run(dc);
bcache_device_link(&dc->disk, c, "bdev");
+ /* Allow the writeback thread to proceed */
+ up_write(&dc->writeback_lock);
+
pr_info("Caching %s as %s on set %pU",
bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
dc->disk.c->sb.set_uuid);
@@ -1361,6 +1373,9 @@ static void cache_set_flush(struct closure *cl)
struct btree *b;
unsigned i;
+ if (!c)
+ closure_return(cl);
+
bch_cache_accounting_destroy(&c->accounting);
kobject_put(&c->internal);
@@ -1823,11 +1838,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
return 0;
}
-static void register_cache(struct cache_sb *sb, struct page *sb_page,
+static int register_cache(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev, struct cache *ca)
{
char name[BDEVNAME_SIZE];
- const char *err = "cannot allocate memory";
+ const char *err = NULL;
+ int ret = 0;
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
@@ -1842,27 +1858,35 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page,
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
- if (cache_alloc(sb, ca) != 0)
+ ret = cache_alloc(sb, ca);
+ if (ret != 0)
goto err;
- err = "error creating kobject";
- if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
- goto err;
+ if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
+ err = "error calling kobject_add";
+ ret = -ENOMEM;
+ goto out;
+ }
mutex_lock(&bch_register_lock);
err = register_cache_set(ca);
mutex_unlock(&bch_register_lock);
- if (err)
- goto err;
+ if (err) {
+ ret = -ENODEV;
+ goto out;
+ }
pr_info("registered cache device %s", bdevname(bdev, name));
+
out:
kobject_put(&ca->kobj);
- return;
+
err:
- pr_notice("error opening %s: %s", bdevname(bdev, name), err);
- goto out;
+ if (err)
+ pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+
+ return ret;
}
/* Global interfaces/init */
@@ -1933,6 +1957,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
else
err = "device busy";
mutex_unlock(&bch_register_lock);
+ if (attr == &ksysfs_register_quiet)
+ goto out;
}
goto err;
}
@@ -1958,7 +1984,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!ca)
goto err_close;
- register_cache(sb, sb_page, bdev, ca);
+ if (register_cache(sb, sb_page, bdev, ca) != 0)
+ goto err_close;
}
out:
if (sb_page)
@@ -1971,8 +1998,7 @@ out:
err_close:
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
err:
- if (attr != &ksysfs_register_quiet)
- pr_info("error opening %s: %s", path, err);
+ pr_info("error opening %s: %s", path, err);
ret = -EINVAL;
goto out;
}
@@ -2066,8 +2092,10 @@ static int __init bcache_init(void)
closure_debug_init();
bcache_major = register_blkdev(0, "bcache");
- if (bcache_major < 0)
+ if (bcache_major < 0) {
+ unregister_reboot_notifier(&reboot);
return bcache_major;
+ }
if (!(bcache_wq = create_workqueue("bcache")) ||
!(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index b23f88d9f18c..b9346cd9cda1 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -323,6 +323,10 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
static bool dirty_pred(struct keybuf *buf, struct bkey *k)
{
+ struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
+
+ BUG_ON(KEY_INODE(k) != dc->disk.id);
+
return KEY_DIRTY(k);
}
@@ -372,11 +376,24 @@ next:
}
}
+/*
+ * Returns true if we scanned the entire disk
+ */
static bool refill_dirty(struct cached_dev *dc)
{
struct keybuf *buf = &dc->writeback_keys;
+ struct bkey start = KEY(dc->disk.id, 0, 0);
struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
- bool searched_from_start = false;
+ struct bkey start_pos;
+
+ /*
+ * make sure keybuf pos is inside the range for this disk - at bringup
+ * we might not be attached yet so this disk's inode nr isn't
+ * initialized then
+ */
+ if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
+ bkey_cmp(&buf->last_scanned, &end) > 0)
+ buf->last_scanned = start;
if (dc->partial_stripes_expensive) {
refill_full_stripes(dc);
@@ -384,14 +401,20 @@ static bool refill_dirty(struct cached_dev *dc)
return false;
}
- if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
- buf->last_scanned = KEY(dc->disk.id, 0, 0);
- searched_from_start = true;
- }
-
+ start_pos = buf->last_scanned;
bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
- return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
+ if (bkey_cmp(&buf->last_scanned, &end) < 0)
+ return false;
+
+ /*
+ * If we get to the end start scanning again from the beginning, and
+ * only scan up to where we initially started scanning from:
+ */
+ buf->last_scanned = start;
+ bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
+
+ return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
}
static int bch_writeback_thread(void *arg)
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 0a9dab187b79..073a042aed24 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -63,7 +63,8 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
static inline void bch_writeback_queue(struct cached_dev *dc)
{
- wake_up_process(dc->writeback_thread);
+ if (!IS_ERR_OR_NULL(dc->writeback_thread))
+ wake_up_process(dc->writeback_thread);
}
static inline void bch_writeback_add(struct cached_dev *dc)
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index f6543f3a970f..27f2ef300f8b 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -867,19 +867,40 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
return 0;
}
-#define WRITE_LOCK(cmd) \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
+#define WRITE_LOCK(cmd) \
+ down_write(&cmd->root_lock); \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+ up_write(&cmd->root_lock); \
return -EINVAL; \
- down_write(&cmd->root_lock)
+ }
#define WRITE_LOCK_VOID(cmd) \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
+ down_write(&cmd->root_lock); \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+ up_write(&cmd->root_lock); \
return; \
- down_write(&cmd->root_lock)
+ }
#define WRITE_UNLOCK(cmd) \
up_write(&cmd->root_lock)
+#define READ_LOCK(cmd) \
+ down_read(&cmd->root_lock); \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+ up_read(&cmd->root_lock); \
+ return -EINVAL; \
+ }
+
+#define READ_LOCK_VOID(cmd) \
+ down_read(&cmd->root_lock); \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+ up_read(&cmd->root_lock); \
+ return; \
+ }
+
+#define READ_UNLOCK(cmd) \
+ up_read(&cmd->root_lock)
+
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
{
int r;
@@ -1015,22 +1036,20 @@ int dm_cache_load_discards(struct dm_cache_metadata *cmd,
{
int r;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = __load_discards(cmd, fn, context);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
-dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd)
+int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result)
{
- dm_cblock_t r;
+ READ_LOCK(cmd);
+ *result = cmd->cache_blocks;
+ READ_UNLOCK(cmd);
- down_read(&cmd->root_lock);
- r = cmd->cache_blocks;
- up_read(&cmd->root_lock);
-
- return r;
+ return 0;
}
static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
@@ -1188,9 +1207,9 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
{
int r;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = __load_mappings(cmd, policy, fn, context);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1215,18 +1234,18 @@ static int __dump_mappings(struct dm_cache_metadata *cmd)
void dm_cache_dump(struct dm_cache_metadata *cmd)
{
- down_read(&cmd->root_lock);
+ READ_LOCK_VOID(cmd);
__dump_mappings(cmd);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
}
int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
{
int r;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = cmd->changed;
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1276,9 +1295,9 @@ int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats)
{
- down_read(&cmd->root_lock);
+ READ_LOCK_VOID(cmd);
*stats = cmd->stats;
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
}
void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
@@ -1312,9 +1331,9 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
{
int r = -EINVAL;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = dm_sm_get_nr_free(cmd->metadata_sm, result);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1324,9 +1343,9 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
{
int r = -EINVAL;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1417,7 +1436,13 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
{
- return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
+ int r;
+
+ READ_LOCK(cmd);
+ r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
+ READ_UNLOCK(cmd);
+
+ return r;
}
void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
@@ -1440,10 +1465,7 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
struct dm_block *sblock;
struct cache_disk_superblock *disk_super;
- /*
- * We ignore fail_io for this function.
- */
- down_write(&cmd->root_lock);
+ WRITE_LOCK(cmd);
set_bit(NEEDS_CHECK, &cmd->flags);
r = superblock_lock(cmd, &sblock);
@@ -1458,19 +1480,17 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
dm_bm_unlock(sblock);
out:
- up_write(&cmd->root_lock);
+ WRITE_UNLOCK(cmd);
return r;
}
-bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd)
+int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
{
- bool needs_check;
+ READ_LOCK(cmd);
+ *result = !!test_bit(NEEDS_CHECK, &cmd->flags);
+ READ_UNLOCK(cmd);
- down_read(&cmd->root_lock);
- needs_check = !!test_bit(NEEDS_CHECK, &cmd->flags);
- up_read(&cmd->root_lock);
-
- return needs_check;
+ return 0;
}
int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
index 2ffee21f318d..8528744195e5 100644
--- a/drivers/md/dm-cache-metadata.h
+++ b/drivers/md/dm-cache-metadata.h
@@ -66,7 +66,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
* origin blocks to map to.
*/
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
-dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
+int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result);
int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
sector_t discard_block_size,
@@ -137,7 +137,7 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
*/
int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
-bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd);
+int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 5780accffa30..bb9b92ebbf8e 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -984,9 +984,14 @@ static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mod
static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
{
- bool needs_check = dm_cache_metadata_needs_check(cache->cmd);
+ bool needs_check;
enum cache_metadata_mode old_mode = get_cache_mode(cache);
+ if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
+ DMERR("unable to read needs_check flag, setting failure mode");
+ new_mode = CM_FAIL;
+ }
+
if (new_mode == CM_WRITE && needs_check) {
DMERR("%s: unable to switch cache to write mode until repaired.",
cache_device_name(cache));
@@ -3510,6 +3515,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
char buf[BDEVNAME_SIZE];
struct cache *cache = ti->private;
dm_cblock_t residency;
+ bool needs_check;
switch (type) {
case STATUSTYPE_INFO:
@@ -3583,7 +3589,9 @@ static void cache_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("rw ");
- if (dm_cache_metadata_needs_check(cache->cmd))
+ r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
+
+ if (r || needs_check)
DMEMIT("needs_check ");
else
DMEMIT("- ");
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index fae34e7a0b1e..12b5216c2cfe 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -69,7 +69,7 @@ struct dm_exception_store_type {
* Update the metadata with this exception.
*/
void (*commit_exception) (struct dm_exception_store *store,
- struct dm_exception *e,
+ struct dm_exception *e, int valid,
void (*callback) (void *, int success),
void *callback_context);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 3164b8bce294..4d3909393f2c 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -695,7 +695,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
}
static void persistent_commit_exception(struct dm_exception_store *store,
- struct dm_exception *e,
+ struct dm_exception *e, int valid,
void (*callback) (void *, int success),
void *callback_context)
{
@@ -704,6 +704,9 @@ static void persistent_commit_exception(struct dm_exception_store *store,
struct core_exception ce;
struct commit_callback *cb;
+ if (!valid)
+ ps->valid = 0;
+
ce.old_chunk = e->old_chunk;
ce.new_chunk = e->new_chunk;
write_exception(ps, ps->current_committed++, &ce);
diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
index 9b7c8c8049d6..4d50a12cf00c 100644
--- a/drivers/md/dm-snap-transient.c
+++ b/drivers/md/dm-snap-transient.c
@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store,
}
static void transient_commit_exception(struct dm_exception_store *store,
- struct dm_exception *e,
+ struct dm_exception *e, int valid,
void (*callback) (void *, int success),
void *callback_context)
{
/* Just succeed */
- callback(callback_context, 1);
+ callback(callback_context, valid);
}
static void transient_usage(struct dm_exception_store *store,
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index f68d0ae5b198..e4d1bafe78c1 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1105,6 +1105,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
int i;
int r = -EINVAL;
char *origin_path, *cow_path;
+ dev_t origin_dev, cow_dev;
unsigned args_used, num_flush_bios = 1;
fmode_t origin_mode = FMODE_READ;
@@ -1135,11 +1136,19 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->error = "Cannot get origin device";
goto bad_origin;
}
+ origin_dev = s->origin->bdev->bd_dev;
cow_path = argv[0];
argv++;
argc--;
+ cow_dev = dm_get_dev_t(cow_path);
+ if (cow_dev && cow_dev == origin_dev) {
+ ti->error = "COW device cannot be the same as origin device";
+ r = -EINVAL;
+ goto bad_cow;
+ }
+
r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
if (r) {
ti->error = "Cannot get COW device";
@@ -1437,8 +1446,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
dm_table_event(s->ti->table);
}
-static void pending_complete(struct dm_snap_pending_exception *pe, int success)
+static void pending_complete(void *context, int success)
{
+ struct dm_snap_pending_exception *pe = context;
struct dm_exception *e;
struct dm_snapshot *s = pe->snap;
struct bio *origin_bios = NULL;
@@ -1506,24 +1516,13 @@ out:
free_pending_exception(pe);
}
-static void commit_callback(void *context, int success)
-{
- struct dm_snap_pending_exception *pe = context;
-
- pending_complete(pe, success);
-}
-
static void complete_exception(struct dm_snap_pending_exception *pe)
{
struct dm_snapshot *s = pe->snap;
- if (unlikely(pe->copy_error))
- pending_complete(pe, 0);
-
- else
- /* Update the metadata if we are persistent */
- s->store->type->commit_exception(s->store, &pe->e,
- commit_callback, pe);
+ /* Update the metadata if we are persistent */
+ s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
+ pending_complete, pe);
}
/*
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 061152a43730..cb5d0daf53bb 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -365,6 +365,26 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
}
/*
+ * Convert the path to a device
+ */
+dev_t dm_get_dev_t(const char *path)
+{
+ dev_t uninitialized_var(dev);
+ struct block_device *bdev;
+
+ bdev = lookup_bdev(path);
+ if (IS_ERR(bdev))
+ dev = name_to_dev_t(path);
+ else {
+ dev = bdev->bd_dev;
+ bdput(bdev);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(dm_get_dev_t);
+
+/*
* Add a device to the list, or just increment the usage count if
* it's already present.
*/
@@ -372,23 +392,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
struct dm_dev **result)
{
int r;
- dev_t uninitialized_var(dev);
+ dev_t dev;
struct dm_dev_internal *dd;
struct dm_table *t = ti->table;
- struct block_device *bdev;
BUG_ON(!t);
- /* convert the path to a device */
- bdev = lookup_bdev(path);
- if (IS_ERR(bdev)) {
- dev = name_to_dev_t(path);
- if (!dev)
- return -ENODEV;
- } else {
- dev = bdev->bd_dev;
- bdput(bdev);
- }
+ dev = dm_get_dev_t(path);
+ if (!dev)
+ return -ENODEV;
dd = find_device(&t->devices, dev);
if (!dd) {
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index c219a053c7f6..911ada643364 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1943,5 +1943,8 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
{
- dm_tm_issue_prefetches(pmd->tm);
+ down_read(&pmd->root_lock);
+ if (!pmd->fail_io)
+ dm_tm_issue_prefetches(pmd->tm);
+ up_read(&pmd->root_lock);
}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 63903a5a5d9e..a1cc797fe88f 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3453,8 +3453,8 @@ static void pool_postsuspend(struct dm_target *ti)
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- cancel_delayed_work(&pool->waker);
- cancel_delayed_work(&pool->no_space_timeout);
+ cancel_delayed_work_sync(&pool->waker);
+ cancel_delayed_work_sync(&pool->no_space_timeout);
flush_workqueue(pool->wq);
(void) commit(pool);
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 5df40480228b..c338aebb4ccd 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1109,12 +1109,8 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
* back into ->request_fn() could deadlock attempting to grab the
* queue lock again.
*/
- if (run_queue) {
- if (md->queue->mq_ops)
- blk_mq_run_hw_queues(md->queue, true);
- else
- blk_run_queue_async(md->queue);
- }
+ if (!md->queue->mq_ops && run_queue)
+ blk_run_queue_async(md->queue);
/*
* dm_put() must be at the end of this function. See the comment above
@@ -1191,6 +1187,8 @@ static void dm_unprep_request(struct request *rq)
if (clone)
free_rq_clone(clone);
+ else if (!tio->md->queue->mq_ops)
+ free_rq_tio(tio);
}
/*
@@ -1212,9 +1210,9 @@ static void dm_requeue_original_request(struct mapped_device *md,
{
int rw = rq_data_dir(rq);
+ rq_end_stats(md, rq);
dm_unprep_request(rq);
- rq_end_stats(md, rq);
if (!rq->q->mq_ops)
old_requeue_request(rq);
else {
@@ -1334,7 +1332,10 @@ static void dm_complete_request(struct request *rq, int error)
struct dm_rq_target_io *tio = tio_from_request(rq);
tio->error = error;
- blk_complete_request(rq);
+ if (!rq->q->mq_ops)
+ blk_complete_request(rq);
+ else
+ blk_mq_complete_request(rq, error);
}
/*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 61aacab424cf..b1e1f6b95782 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2017,28 +2017,32 @@ int md_integrity_register(struct mddev *mddev)
}
EXPORT_SYMBOL(md_integrity_register);
-/* Disable data integrity if non-capable/non-matching disk is being added */
-void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
+/*
+ * Attempt to add an rdev, but only if it is consistent with the current
+ * integrity profile
+ */
+int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
struct blk_integrity *bi_rdev;
struct blk_integrity *bi_mddev;
+ char name[BDEVNAME_SIZE];
if (!mddev->gendisk)
- return;
+ return 0;
bi_rdev = bdev_get_integrity(rdev->bdev);
bi_mddev = blk_get_integrity(mddev->gendisk);
if (!bi_mddev) /* nothing to do */
- return;
- if (rdev->raid_disk < 0) /* skip spares */
- return;
- if (bi_rdev && blk_integrity_compare(mddev->gendisk,
- rdev->bdev->bd_disk) >= 0)
- return;
- WARN_ON_ONCE(!mddev->suspended);
- printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
- blk_integrity_unregister(mddev->gendisk);
+ return 0;
+
+ if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
+ printk(KERN_NOTICE "%s: incompatible integrity profile for %s\n",
+ mdname(mddev), bdevname(rdev->bdev, name));
+ return -ENXIO;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(md_integrity_add_rdev);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index ca0b643fe3c1..dfa57b41541b 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -657,7 +657,7 @@ extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
extern int md_check_no_bitmap(struct mddev *mddev);
extern int md_integrity_register(struct mddev *mddev);
-extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
+extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void mddev_init(struct mddev *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 7331a80d89f1..dd483bb2e111 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -129,7 +129,9 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
}
multipath = conf->multipaths + mp_bh->path;
- mp_bh->bio = *bio;
+ bio_init(&mp_bh->bio);
+ __bio_clone_fast(&mp_bh->bio, bio);
+
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
@@ -257,6 +259,9 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
+ err = md_integrity_add_rdev(rdev, mddev);
+ if (err)
+ break;
spin_lock_irq(&conf->device_lock);
mddev->degraded--;
rdev->raid_disk = path;
@@ -264,9 +269,6 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
spin_unlock_irq(&conf->device_lock);
rcu_assign_pointer(p->rdev, rdev);
err = 0;
- mddev_suspend(mddev);
- md_integrity_add_rdev(rdev, mddev);
- mddev_resume(mddev);
break;
}
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index fca6dbcf9a47..7e44005595c1 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -152,12 +152,9 @@ static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
static int brb_pop(struct bop_ring_buffer *brb)
{
- struct block_op *bop;
-
if (brb_empty(brb))
return -ENODATA;
- bop = brb->bops + brb->begin;
brb->begin = brb_next(brb, brb->begin);
return 0;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e2169ff6e0f0..515554c7365b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1589,6 +1589,9 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (mddev->recovery_disabled == conf->recovery_disabled)
return -EBUSY;
+ if (md_integrity_add_rdev(rdev, mddev))
+ return -ENXIO;
+
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
@@ -1632,9 +1635,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
break;
}
}
- mddev_suspend(mddev);
- md_integrity_add_rdev(rdev, mddev);
- mddev_resume(mddev);
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf);
@@ -2274,6 +2274,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
if (fail) {
spin_lock_irq(&conf->device_lock);
list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
+ conf->nr_queued++;
spin_unlock_irq(&conf->device_lock);
md_wakeup_thread(conf->mddev->thread);
} else {
@@ -2391,8 +2392,10 @@ static void raid1d(struct md_thread *thread)
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
- list_add(&tmp, &conf->bio_end_io_list);
- list_del_init(&conf->bio_end_io_list);
+ while (!list_empty(&conf->bio_end_io_list)) {
+ list_move(conf->bio_end_io_list.prev, &tmp);
+ conf->nr_queued--;
+ }
}
spin_unlock_irqrestore(&conf->device_lock, flags);
while (!list_empty(&tmp)) {
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 84e597e1c489..ebb0dd612ebd 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1698,6 +1698,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
return -EINVAL;
+ if (md_integrity_add_rdev(rdev, mddev))
+ return -ENXIO;
+
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
@@ -1739,9 +1742,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
rcu_assign_pointer(p->rdev, rdev);
break;
}
- mddev_suspend(mddev);
- md_integrity_add_rdev(rdev, mddev);
- mddev_resume(mddev);
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
@@ -2664,6 +2664,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
if (fail) {
spin_lock_irq(&conf->device_lock);
list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
+ conf->nr_queued++;
spin_unlock_irq(&conf->device_lock);
md_wakeup_thread(conf->mddev->thread);
} else {
@@ -2691,8 +2692,10 @@ static void raid10d(struct md_thread *thread)
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
- list_add(&tmp, &conf->bio_end_io_list);
- list_del_init(&conf->bio_end_io_list);
+ while (!list_empty(&conf->bio_end_io_list)) {
+ list_move(conf->bio_end_io_list.prev, &tmp);
+ conf->nr_queued--;
+ }
}
spin_unlock_irqrestore(&conf->device_lock, flags);
while (!list_empty(&tmp)) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 704ef7fcfbf8..10ce885445f6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -340,8 +340,7 @@ static void release_inactive_stripe_list(struct r5conf *conf,
int hash)
{
int size;
- unsigned long do_wakeup = 0;
- int i = 0;
+ bool do_wakeup = false;
unsigned long flags;
if (hash == NR_STRIPE_HASH_LOCKS) {
@@ -362,19 +361,15 @@ static void release_inactive_stripe_list(struct r5conf *conf,
!list_empty(list))
atomic_dec(&conf->empty_inactive_list_nr);
list_splice_tail_init(list, conf->inactive_list + hash);
- do_wakeup |= 1 << hash;
+ do_wakeup = true;
spin_unlock_irqrestore(conf->hash_locks + hash, flags);
}
size--;
hash--;
}
- for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
- if (do_wakeup & (1 << i))
- wake_up(&conf->wait_for_stripe[i]);
- }
-
if (do_wakeup) {
+ wake_up(&conf->wait_for_stripe);
if (atomic_read(&conf->active_stripes) == 0)
wake_up(&conf->wait_for_quiescent);
if (conf->retry_read_aligned)
@@ -687,15 +682,14 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
if (!sh) {
set_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state);
- wait_event_exclusive_cmd(
- conf->wait_for_stripe[hash],
+ wait_event_lock_irq(
+ conf->wait_for_stripe,
!list_empty(conf->inactive_list + hash) &&
(atomic_read(&conf->active_stripes)
< (conf->max_nr_stripes * 3 / 4)
|| !test_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state)),
- spin_unlock_irq(conf->hash_locks + hash),
- spin_lock_irq(conf->hash_locks + hash));
+ *(conf->hash_locks + hash));
clear_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state);
} else {
@@ -720,9 +714,6 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
}
} while (sh == NULL);
- if (!list_empty(conf->inactive_list + hash))
- wake_up(&conf->wait_for_stripe[hash]);
-
spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
@@ -2091,6 +2082,14 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
unsigned long cpu;
int err = 0;
+ /*
+ * Never shrink. And mddev_suspend() could deadlock if this is called
+ * from raid5d. In that case, scribble_disks and scribble_sectors
+ * should equal to new_disks and new_sectors
+ */
+ if (conf->scribble_disks >= new_disks &&
+ conf->scribble_sectors >= new_sectors)
+ return 0;
mddev_suspend(conf->mddev);
get_online_cpus();
for_each_present_cpu(cpu) {
@@ -2112,6 +2111,10 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
}
put_online_cpus();
mddev_resume(conf->mddev);
+ if (!err) {
+ conf->scribble_disks = new_disks;
+ conf->scribble_sectors = new_sectors;
+ }
return err;
}
@@ -2192,7 +2195,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
cnt = 0;
list_for_each_entry(nsh, &newstripes, lru) {
lock_device_hash_lock(conf, hash);
- wait_event_exclusive_cmd(conf->wait_for_stripe[hash],
+ wait_event_cmd(conf->wait_for_stripe,
!list_empty(conf->inactive_list + hash),
unlock_device_hash_lock(conf, hash),
lock_device_hash_lock(conf, hash));
@@ -4238,7 +4241,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
(1 << STRIPE_SYNCING) |
(1 << STRIPE_REPLACED) |
- (1 << STRIPE_PREREAD_ACTIVE) |
(1 << STRIPE_DELAYED) |
(1 << STRIPE_BIT_DELAY) |
(1 << STRIPE_FULL_WRITE) |
@@ -4253,6 +4255,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
(1 << STRIPE_REPLACED)));
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+ (1 << STRIPE_PREREAD_ACTIVE) |
(1 << STRIPE_DEGRADED)),
head_sh->state & (1 << STRIPE_INSYNC));
@@ -6414,6 +6417,12 @@ static int raid5_alloc_percpu(struct r5conf *conf)
}
put_online_cpus();
+ if (!err) {
+ conf->scribble_disks = max(conf->raid_disks,
+ conf->previous_raid_disks);
+ conf->scribble_sectors = max(conf->chunk_sectors,
+ conf->prev_chunk_sectors);
+ }
return err;
}
@@ -6504,9 +6513,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
seqcount_init(&conf->gen_lock);
mutex_init(&conf->cache_size_mutex);
init_waitqueue_head(&conf->wait_for_quiescent);
- for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
- init_waitqueue_head(&conf->wait_for_stripe[i]);
- }
+ init_waitqueue_head(&conf->wait_for_stripe);
init_waitqueue_head(&conf->wait_for_overlap);
INIT_LIST_HEAD(&conf->handle_list);
INIT_LIST_HEAD(&conf->hold_list);
@@ -7015,8 +7022,8 @@ static int run(struct mddev *mddev)
}
if (discard_supported &&
- mddev->queue->limits.max_discard_sectors >= stripe &&
- mddev->queue->limits.discard_granularity >= stripe)
+ mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
+ mddev->queue->limits.discard_granularity >= stripe)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
mddev->queue);
else
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index a415e1cd39b8..517d4b68a1be 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -510,6 +510,8 @@ struct r5conf {
* conversions
*/
} __percpu *percpu;
+ int scribble_disks;
+ int scribble_sectors;
#ifdef CONFIG_HOTPLUG_CPU
struct notifier_block cpu_notify;
#endif
@@ -522,7 +524,7 @@ struct r5conf {
atomic_t empty_inactive_list_nr;
struct llist_head released_stripes;
wait_queue_head_t wait_for_quiescent;
- wait_queue_head_t wait_for_stripe[NR_STRIPE_HASH_LOCKS];
+ wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
unsigned long cache_state;
#define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index c38ef1a72b4a..e2a3833170e3 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -2313,9 +2313,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
__func__, c->delivery_system, fe->ops.info.type);
- /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't
- * do it, it is done for it. */
- info->caps |= FE_CAN_INVERSION_AUTO;
+ /* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
+ if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
+ info->caps |= FE_CAN_INVERSION_AUTO;
err = 0;
break;
}
diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
index 0e209b56c76c..c6abeb4fba9d 100644
--- a/drivers/media/dvb-frontends/tda1004x.c
+++ b/drivers/media/dvb-frontends/tda1004x.c
@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
struct tda1004x_state* state = fe->demodulator_priv;
+ int status;
dprintk("%s\n", __func__);
+ status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
+ if (status == -1)
+ return -EIO;
+
+ /* Only update the properties cache if device is locked */
+ if (!(status & 8))
+ return 0;
+
// inversion status
fe_params->inversion = INVERSION_OFF;
if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index e4900df1140b..c24839cfcc35 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -1161,12 +1161,23 @@ static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, in
}
}
+static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ struct adv7511_edid_detect ed;
+
+ /* We failed to read the EDID, so send an event for this. */
+ ed.present = false;
+ ed.segment = adv7511_rd(sd, 0xc4);
+ v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+ v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
+}
+
static void adv7511_edid_handler(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
struct v4l2_subdev *sd = &state->sd;
- struct adv7511_edid_detect ed;
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
@@ -1191,9 +1202,7 @@ static void adv7511_edid_handler(struct work_struct *work)
}
/* We failed to read the EDID, so send an event for this. */
- ed.present = false;
- ed.segment = adv7511_rd(sd, 0xc4);
- v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+ adv7511_notify_no_edid(sd);
v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
}
@@ -1264,7 +1273,6 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
/* update read only ctrls */
v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
- v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
@@ -1294,6 +1302,7 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
}
adv7511_s_power(sd, false);
memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
+ adv7511_notify_no_edid(sd);
}
}
@@ -1370,6 +1379,7 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
}
/* one more segment read ok */
state->edid.segments = segment + 1;
+ v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
/* Request next EDID segment */
v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
@@ -1389,7 +1399,6 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
ed.present = true;
ed.segment = 0;
state->edid_detect_counter++;
- v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
return ed.present;
}
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 5631ec004eed..01adcdc52346 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1960,10 +1960,9 @@ static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
}
/* tx 5v detect */
- tx_5v = io_read(sd, 0x70) & info->cable_det_mask;
+ tx_5v = irq_reg_0x70 & info->cable_det_mask;
if (tx_5v) {
v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
- io_write(sd, 0x71, tx_5v);
adv76xx_s_detect_tx_5v_ctrl(sd);
if (handled)
*handled = true;
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 728d2cc8a3e7..175a76114953 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -478,7 +478,6 @@ static const struct i2c_device_id ir_kbd_id[] = {
{ "ir_rx_z8f0811_hdpvr", 0 },
{ }
};
-MODULE_DEVICE_TABLE(i2c, ir_kbd_id);
static struct i2c_driver ir_kbd_driver = {
.driver = {
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 15a4ebc2844d..51dbef2f9a48 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2334,6 +2334,19 @@ static int bttv_g_fmt_vid_overlay(struct file *file, void *priv,
return 0;
}
+static void bttv_get_width_mask_vid_cap(const struct bttv_format *fmt,
+ unsigned int *width_mask,
+ unsigned int *width_bias)
+{
+ if (fmt->flags & FORMAT_FLAGS_PLANAR) {
+ *width_mask = ~15; /* width must be a multiple of 16 pixels */
+ *width_bias = 8; /* nearest */
+ } else {
+ *width_mask = ~3; /* width must be a multiple of 4 pixels */
+ *width_bias = 2; /* nearest */
+ }
+}
+
static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
@@ -2343,6 +2356,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
enum v4l2_field field;
__s32 width, height;
__s32 height2;
+ unsigned int width_mask, width_bias;
int rc;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
@@ -2375,9 +2389,9 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
width = f->fmt.pix.width;
height = f->fmt.pix.height;
+ bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
rc = limit_scaled_size_lock(fh, &width, &height, field,
- /* width_mask: 4 pixels */ ~3,
- /* width_bias: nearest */ 2,
+ width_mask, width_bias,
/* adjust_size */ 1,
/* adjust_crop */ 0);
if (0 != rc)
@@ -2410,6 +2424,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
__s32 width, height;
+ unsigned int width_mask, width_bias;
enum v4l2_field field;
retval = bttv_switch_type(fh, f->type);
@@ -2424,9 +2439,10 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
height = f->fmt.pix.height;
field = f->fmt.pix.field;
+ fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field,
- /* width_mask: 4 pixels */ ~3,
- /* width_bias: nearest */ 2,
+ width_mask, width_bias,
/* adjust_size */ 1,
/* adjust_crop */ 1);
if (0 != retval)
@@ -2434,8 +2450,6 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.field = field;
- fmt = format_by_fourcc(f->fmt.pix.pixelformat);
-
/* update our state informations */
fh->fmt = fmt;
fh->cap.field = f->fmt.pix.field;
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index 1d2c310ce838..94f816244407 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -1211,6 +1211,8 @@ static int alsa_device_init(struct saa7134_dev *dev)
static int alsa_device_exit(struct saa7134_dev *dev)
{
+ if (!snd_saa7134_cards[dev->nr])
+ return 1;
snd_card_free(snd_saa7134_cards[dev->nr]);
snd_saa7134_cards[dev->nr] = NULL;
@@ -1260,7 +1262,8 @@ static void saa7134_alsa_exit(void)
int idx;
for (idx = 0; idx < SNDRV_CARDS; idx++) {
- snd_card_free(snd_saa7134_cards[idx]);
+ if (snd_saa7134_cards[idx])
+ snd_card_free(snd_saa7134_cards[idx]);
}
saa7134_dmasound_init = NULL;
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 518086c7aed5..15e56c07b217 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1219,10 +1219,13 @@ static int saa7134_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.height = dev->height;
f->fmt.pix.field = dev->field;
f->fmt.pix.pixelformat = dev->fmt->fourcc;
- f->fmt.pix.bytesperline =
- (f->fmt.pix.width * dev->fmt->depth) >> 3;
+ if (dev->fmt->planar)
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+ else
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * dev->fmt->depth) / 8;
f->fmt.pix.sizeimage =
- f->fmt.pix.height * f->fmt.pix.bytesperline;
+ (f->fmt.pix.height * f->fmt.pix.width * dev->fmt->depth) / 8;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
@@ -1298,10 +1301,13 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
if (f->fmt.pix.height > maxh)
f->fmt.pix.height = maxh;
f->fmt.pix.width &= ~0x03;
- f->fmt.pix.bytesperline =
- (f->fmt.pix.width * fmt->depth) >> 3;
+ if (fmt->planar)
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+ else
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * fmt->depth) / 8;
f->fmt.pix.sizeimage =
- f->fmt.pix.height * f->fmt.pix.bytesperline;
+ (f->fmt.pix.height * f->fmt.pix.width * fmt->depth) / 8;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 654e964f84a2..d76511c1c1e3 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -1342,7 +1342,7 @@ static void coda_finish_encode(struct coda_ctx *ctx)
/* Calculate bytesused field */
if (dst_buf->sequence == 0) {
- vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr +
ctx->vpu_header_size[0] +
ctx->vpu_header_size[1] +
ctx->vpu_header_size[2]);
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 15516a6e3a39..323aad3c89de 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -2119,14 +2119,12 @@ static int coda_probe(struct platform_device *pdev)
pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
- if (of_id) {
+ if (of_id)
dev->devtype = of_id->data;
- } else if (pdev_id) {
+ else if (pdev_id)
dev->devtype = &coda_devdata[pdev_id->driver_data];
- } else {
- ret = -EINVAL;
- goto err_v4l2_register;
- }
+ else
+ return -EINVAL;
spin_lock_init(&dev->irqlock);
INIT_LIST_HEAD(&dev->instances);
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
index 6310acab60e7..d41ae950d1a1 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -154,6 +154,7 @@ static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
mutex_lock(sru->ctrls.lock);
ctrl0 |= vsp1_sru_read(sru, VI6_SRU_CTRL0)
& (VI6_SRU_CTRL0_PARAM0_MASK | VI6_SRU_CTRL0_PARAM1_MASK);
+ vsp1_sru_write(sru, VI6_SRU_CTRL0, ctrl0);
mutex_unlock(sru->ctrls.lock);
vsp1_sru_write(sru, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index 7830aef3db45..40f77685cc4a 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -153,6 +153,8 @@ static int sunxi_ir_probe(struct platform_device *pdev)
if (!ir)
return -ENOMEM;
+ spin_lock_init(&ir->ir_lock);
+
if (of_device_is_compatible(dn, "allwinner,sun5i-a13-ir"))
ir->fifo_size = 64;
else
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index ce157edd45fa..0e1ca2b00e61 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -168,6 +168,7 @@ static int si2157_init(struct dvb_frontend *fe)
len = fw->data[fw->size - remaining];
if (len > SI2157_ARGLEN) {
dev_err(&client->dev, "Bad firmware length\n");
+ ret = -EINVAL;
goto err_release_firmware;
}
memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index 0934024fb89d..d91ded795c93 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -159,7 +159,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
Set the status so poll routines can check and avoid
access after disconnect.
*/
- dev->dev_state = DEV_DISCONNECTED;
+ set_bit(DEV_DISCONNECTED, &dev->dev_state);
au0828_rc_unregister(dev);
/* Digital TV */
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index b0f067971979..3d6687f0407d 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -130,7 +130,7 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
bool first = true;
/* do nothing if device is disconnected */
- if (ir->dev->dev_state == DEV_DISCONNECTED)
+ if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state))
return 0;
/* Check IR int */
@@ -260,7 +260,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
cancel_delayed_work_sync(&ir->work);
/* do nothing if device is disconnected */
- if (ir->dev->dev_state != DEV_DISCONNECTED) {
+ if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) {
/* Disable IR */
au8522_rc_clear(ir, 0xe0, 1 << 4);
}
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 45c622e234f7..7b2fe1b56039 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -104,14 +104,13 @@ static inline void print_err_status(struct au0828_dev *dev,
static int check_dev(struct au0828_dev *dev)
{
- if (dev->dev_state & DEV_DISCONNECTED) {
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) {
pr_info("v4l2 ioctl: device not present\n");
return -ENODEV;
}
- if (dev->dev_state & DEV_MISCONFIGURED) {
- pr_info("v4l2 ioctl: device is misconfigured; "
- "close and open it again\n");
+ if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) {
+ pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n");
return -EIO;
}
return 0;
@@ -519,8 +518,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
if (!dev)
return 0;
- if ((dev->dev_state & DEV_DISCONNECTED) ||
- (dev->dev_state & DEV_MISCONFIGURED))
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state) ||
+ test_bit(DEV_MISCONFIGURED, &dev->dev_state))
return 0;
if (urb->status < 0) {
@@ -766,10 +765,10 @@ static int au0828_stream_interrupt(struct au0828_dev *dev)
int ret = 0;
dev->stream_state = STREAM_INTERRUPT;
- if (dev->dev_state == DEV_DISCONNECTED)
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
return -ENODEV;
else if (ret) {
- dev->dev_state = DEV_MISCONFIGURED;
+ set_bit(DEV_MISCONFIGURED, &dev->dev_state);
dprintk(1, "%s device is misconfigured!\n", __func__);
return ret;
}
@@ -958,7 +957,7 @@ static int au0828_v4l2_open(struct file *filp)
int ret;
dprintk(1,
- "%s called std_set %d dev_state %d stream users %d users %d\n",
+ "%s called std_set %d dev_state %ld stream users %d users %d\n",
__func__, dev->std_set_in_tuner_core, dev->dev_state,
dev->streaming_users, dev->users);
@@ -977,7 +976,7 @@ static int au0828_v4l2_open(struct file *filp)
au0828_analog_stream_enable(dev);
au0828_analog_stream_reset(dev);
dev->stream_state = STREAM_OFF;
- dev->dev_state |= DEV_INITIALIZED;
+ set_bit(DEV_INITIALIZED, &dev->dev_state);
}
dev->users++;
mutex_unlock(&dev->lock);
@@ -991,7 +990,7 @@ static int au0828_v4l2_close(struct file *filp)
struct video_device *vdev = video_devdata(filp);
dprintk(1,
- "%s called std_set %d dev_state %d stream users %d users %d\n",
+ "%s called std_set %d dev_state %ld stream users %d users %d\n",
__func__, dev->std_set_in_tuner_core, dev->dev_state,
dev->streaming_users, dev->users);
@@ -1007,7 +1006,7 @@ static int au0828_v4l2_close(struct file *filp)
del_timer_sync(&dev->vbi_timeout);
}
- if (dev->dev_state == DEV_DISCONNECTED)
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
goto end;
if (dev->users == 1) {
@@ -1036,7 +1035,7 @@ static void au0828_init_tuner(struct au0828_dev *dev)
.type = V4L2_TUNER_ANALOG_TV,
};
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
if (dev->std_set_in_tuner_core)
@@ -1108,7 +1107,7 @@ static int vidioc_querycap(struct file *file, void *priv,
struct video_device *vdev = video_devdata(file);
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
strlcpy(cap->driver, "au0828", sizeof(cap->driver));
@@ -1151,7 +1150,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
f->fmt.pix.width = dev->width;
@@ -1170,7 +1169,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
@@ -1182,7 +1181,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct au0828_dev *dev = video_drvdata(file);
int rc;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
rc = check_dev(dev);
@@ -1204,7 +1203,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
if (norm == dev->std)
@@ -1236,7 +1235,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
*norm = dev->std;
@@ -1259,7 +1258,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
[AU0828_VMUX_DEBUG] = "tv debug"
};
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
tmp = input->index;
@@ -1289,7 +1288,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
*i = dev->ctrl_input;
@@ -1300,7 +1299,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
{
int i;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
switch (AUVI_INPUT(index).type) {
@@ -1385,7 +1384,7 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
a->index = dev->ctrl_ainput;
@@ -1405,7 +1404,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
if (a->index != dev->ctrl_ainput)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return 0;
}
@@ -1417,7 +1416,7 @@ static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
if (t->index != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
strcpy(t->name, "Auvitek tuner");
@@ -1437,7 +1436,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
if (t->index != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
au0828_init_tuner(dev);
@@ -1459,7 +1458,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
if (freq->tuner != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
freq->frequency = dev->ctrl_freq;
return 0;
@@ -1474,7 +1473,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
if (freq->tuner != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
au0828_init_tuner(dev);
@@ -1500,7 +1499,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
format->fmt.vbi.samples_per_line = dev->vbi_width;
@@ -1526,7 +1525,7 @@ static int vidioc_cropcap(struct file *file, void *priv,
if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
cc->bounds.left = 0;
@@ -1548,7 +1547,7 @@ static int vidioc_g_register(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
reg->val = au0828_read(dev, reg->reg);
@@ -1561,7 +1560,7 @@ static int vidioc_s_register(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return au0828_writereg(dev, reg->reg, reg->val);
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index 60b59391ea2a..d1b6405a05a4 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -21,6 +21,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitops.h>
#include <linux/usb.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@@ -122,9 +123,9 @@ enum au0828_stream_state {
/* device state */
enum au0828_dev_state {
- DEV_INITIALIZED = 0x01,
- DEV_DISCONNECTED = 0x02,
- DEV_MISCONFIGURED = 0x04
+ DEV_INITIALIZED = 0,
+ DEV_DISCONNECTED = 1,
+ DEV_MISCONFIGURED = 2
};
struct au0828_dev;
@@ -248,7 +249,7 @@ struct au0828_dev {
int input_type;
int std_set_in_tuner_core;
unsigned int ctrl_input;
- enum au0828_dev_state dev_state;
+ long unsigned int dev_state; /* defined at enum au0828_dev_state */;
enum au0828_stream_state stream_state;
wait_queue_head_t open;
diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
index 146071b8e116..bfff1d1c70ab 100644
--- a/drivers/media/usb/gspca/ov534.c
+++ b/drivers/media/usb/gspca/ov534.c
@@ -1491,8 +1491,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
struct v4l2_fract *tpf = &cp->timeperframe;
struct sd *sd = (struct sd *) gspca_dev;
- /* Set requested framerate */
- sd->frame_rate = tpf->denominator / tpf->numerator;
+ if (tpf->numerator == 0 || tpf->denominator == 0)
+ /* Set default framerate */
+ sd->frame_rate = 30;
+ else
+ /* Set requested framerate */
+ sd->frame_rate = tpf->denominator / tpf->numerator;
+
if (gspca_dev->streaming)
set_frame_rate(gspca_dev);
diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
index c70ff406b07a..c028a5c2438e 100644
--- a/drivers/media/usb/gspca/topro.c
+++ b/drivers/media/usb/gspca/topro.c
@@ -4802,7 +4802,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
struct v4l2_fract *tpf = &cp->timeperframe;
int fr, i;
- sd->framerate = tpf->denominator / tpf->numerator;
+ if (tpf->numerator == 0 || tpf->denominator == 0)
+ sd->framerate = 30;
+ else
+ sd->framerate = tpf->denominator / tpf->numerator;
+
if (gspca_dev->streaming)
setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index b79c36fd8cd2..58f23bcfe94e 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -91,6 +91,7 @@ static const struct usb_device_id pwc_device_table [] = {
{ USB_DEVICE(0x0471, 0x0312) },
{ USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */
{ USB_DEVICE(0x0471, 0x0329) }, /* Philips SPC 900NC PC Camera */
+ { USB_DEVICE(0x0471, 0x032C) }, /* Philips SPC 880NC PC Camera */
{ USB_DEVICE(0x069A, 0x0001) }, /* Askey */
{ USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */
{ USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */
@@ -811,6 +812,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
name = "Philips SPC 900NC webcam";
type_id = 740;
break;
+ case 0x032C:
+ PWC_INFO("Philips SPC 880NC USB webcam detected.\n");
+ name = "Philips SPC 880NC webcam";
+ type_id = 740;
+ break;
default:
return -ENODEV;
break;
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index b693206f66dd..d1dc1a198e3e 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1463,9 +1463,23 @@ static int usbvision_probe(struct usb_interface *intf,
if (usbvision_device_data[model].interface >= 0)
interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
- else
+ else if (ifnum < dev->actconfig->desc.bNumInterfaces)
interface = &dev->actconfig->interface[ifnum]->altsetting[0];
+ else {
+ dev_err(&intf->dev, "interface %d is invalid, max is %d\n",
+ ifnum, dev->actconfig->desc.bNumInterfaces - 1);
+ ret = -ENODEV;
+ goto err_usb;
+ }
+
+ if (interface->desc.bNumEndpoints < 2) {
+ dev_err(&intf->dev, "interface %d has %d endpoints, but must"
+ " have minimum 2\n", ifnum, interface->desc.bNumEndpoints);
+ ret = -ENODEV;
+ goto err_usb;
+ }
endpoint = &interface->endpoint[1].desc;
+
if (!usb_endpoint_xfer_isoc(endpoint)) {
dev_err(&intf->dev, "%s: interface %d. has non-ISO endpoint!\n",
__func__, ifnum);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 327e83ac2469..f38c076752ce 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -415,7 +415,8 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
get_user(kp->index, &up->index) ||
get_user(kp->type, &up->type) ||
get_user(kp->flags, &up->flags) ||
- get_user(kp->memory, &up->memory))
+ get_user(kp->memory, &up->memory) ||
+ get_user(kp->length, &up->length))
return -EFAULT;
if (V4L2_TYPE_IS_OUTPUT(kp->type))
@@ -427,9 +428,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
return -EFAULT;
if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
- if (get_user(kp->length, &up->length))
- return -EFAULT;
-
num_planes = kp->length;
if (num_planes == 0) {
kp->m.planes = NULL;
@@ -462,16 +460,14 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
} else {
switch (kp->memory) {
case V4L2_MEMORY_MMAP:
- if (get_user(kp->length, &up->length) ||
- get_user(kp->m.offset, &up->m.offset))
+ if (get_user(kp->m.offset, &up->m.offset))
return -EFAULT;
break;
case V4L2_MEMORY_USERPTR:
{
compat_long_t tmp;
- if (get_user(kp->length, &up->length) ||
- get_user(tmp, &up->m.userptr))
+ if (get_user(tmp, &up->m.userptr))
return -EFAULT;
kp->m.userptr = (unsigned long)compat_ptr(tmp);
@@ -513,7 +509,8 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
put_user(kp->sequence, &up->sequence) ||
put_user(kp->reserved2, &up->reserved2) ||
- put_user(kp->reserved, &up->reserved))
+ put_user(kp->reserved, &up->reserved) ||
+ put_user(kp->length, &up->length))
return -EFAULT;
if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
@@ -536,13 +533,11 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
} else {
switch (kp->memory) {
case V4L2_MEMORY_MMAP:
- if (put_user(kp->length, &up->length) ||
- put_user(kp->m.offset, &up->m.offset))
+ if (put_user(kp->m.offset, &up->m.offset))
return -EFAULT;
break;
case V4L2_MEMORY_USERPTR:
- if (put_user(kp->length, &up->length) ||
- put_user(kp->m.userptr, &up->m.userptr))
+ if (put_user(kp->m.userptr, &up->m.userptr))
return -EFAULT;
break;
case V4L2_MEMORY_OVERLAY:
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 27b4b9e7c0c2..502984c724ff 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -822,10 +822,10 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
return res | POLLERR;
/*
- * For output streams you can write as long as there are fewer buffers
- * queued than there are buffers available.
+ * For output streams you can call write() as long as there are fewer
+ * buffers queued than there are buffers available.
*/
- if (q->is_output && q->queued_count < q->num_buffers)
+ if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
return res | POLLOUT | POLLWRNORM;
if (list_empty(&q->done_list)) {
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 4d92df6ef9fe..0c84186a6370 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -284,6 +284,16 @@ config MFD_HI6421_PMIC
menus in order to enable them.
We communicate with the Hi6421 via memory-mapped I/O.
+config MFD_HI655X_PMIC
+ tristate "HiSilicon Hi655X series PMU/Codec IC"
+ depends on ARCH_HISI || COMPILE_TEST
+ depends on OF
+ select MFD_CORE
+ select REGMAP_MMIO
+ select REGMAP_IRQ
+ help
+ Select this option to enable Hisilicon hi655x series pmic driver.
+
config HTC_EGPIO
bool "HTC EGPIO support"
depends on GPIOLIB && ARM
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index a8b76b81b467..6a7b0e1fe6ba 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -186,6 +186,7 @@ obj-$(CONFIG_MFD_STW481X) += stw481x.o
obj-$(CONFIG_MFD_IPAQ_MICRO) += ipaq-micro.o
obj-$(CONFIG_MFD_MENF21BMC) += menf21bmc.o
obj-$(CONFIG_MFD_HI6421_PMIC) += hi6421-pmic-core.o
+obj-$(CONFIG_MFD_HI655X_PMIC) += hi655x-pmic.o
obj-$(CONFIG_MFD_DLN2) += dln2.o
obj-$(CONFIG_MFD_RT5033) += rt5033.o
obj-$(CONFIG_MFD_SKY81452) += sky81452.o
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
new file mode 100644
index 000000000000..20607af75927
--- /dev/null
+++ b/drivers/mfd/hi655x-pmic.c
@@ -0,0 +1,176 @@
+/*
+ * Device driver for MFD hi655x PMIC
+ *
+ * Copyright (c) 2016 Hisilicon.
+ *
+ * Authors:
+ * Chen Feng <puck.chen@hisilicon.com>
+ * Fei Wang <w.f@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/hi655x-pmic.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+static const struct mfd_cell hi655x_pmic_devs[] = {
+ { .name = "hi655x-regulator", },
+};
+
+static const struct regmap_irq hi655x_irqs[] = {
+ { .reg_offset = 0, .mask = OTMP_D1R_INT },
+ { .reg_offset = 0, .mask = VSYS_2P5_R_INT },
+ { .reg_offset = 0, .mask = VSYS_UV_D3R_INT },
+ { .reg_offset = 0, .mask = VSYS_6P0_D200UR_INT },
+ { .reg_offset = 0, .mask = PWRON_D4SR_INT },
+ { .reg_offset = 0, .mask = PWRON_D20F_INT },
+ { .reg_offset = 0, .mask = PWRON_D20R_INT },
+ { .reg_offset = 0, .mask = RESERVE_INT },
+};
+
+static struct of_device_id of_hi655x_pmic_child_match_tbl[] = {
+ { .compatible = "hisilicon,hi6552-regulator-pmic", },
+ { .compatible = "hisilicon,hi6552-powerkey", },
+ { .compatible = "hisilicon,hi6552-usbvbus", },
+ { .compatible = "hisilicon,hi6552-coul", },
+ { .compatible = "hisilicon,hi6552-pmu-rtc", },
+ { .compatible = "hisilicon,hi6552-pmic-mntn", },
+ { /* end */ }
+};
+
+static const struct regmap_irq_chip hi655x_irq_chip = {
+ .name = "hi655x-pmic",
+ .irqs = hi655x_irqs,
+ .num_regs = 1,
+ .num_irqs = ARRAY_SIZE(hi655x_irqs),
+ .status_base = HI655X_IRQ_STAT_BASE,
+ .ack_base = HI655X_IRQ_STAT_BASE,
+ .mask_base = HI655X_IRQ_MASK_BASE,
+};
+
+static struct regmap_config hi655x_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = HI655X_STRIDE,
+ .val_bits = 8,
+ .max_register = HI655X_BUS_ADDR(0xFFF),
+};
+
+static void hi655x_local_irq_clear(struct regmap *map)
+{
+ int i;
+
+ regmap_write(map, HI655X_ANA_IRQM_BASE, HI655X_IRQ_CLR);
+ for (i = 0; i < HI655X_IRQ_ARRAY; i++) {
+ regmap_write(map, HI655X_IRQ_STAT_BASE + i * HI655X_STRIDE,
+ HI655X_IRQ_CLR);
+ }
+}
+
+static int hi655x_pmic_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct hi655x_pmic *pmic;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ void __iomem *base;
+
+ pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+ pmic->dev = dev;
+
+ pmic->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!pmic->res)
+ return -ENOENT;
+
+ base = devm_ioremap_resource(dev, pmic->res);
+ if (!base)
+ return -ENOMEM;
+
+ pmic->regmap = devm_regmap_init_mmio_clk(dev, NULL, base,
+ &hi655x_regmap_config);
+
+ regmap_read(pmic->regmap, HI655X_BUS_ADDR(HI655X_VER_REG), &pmic->ver);
+ if ((pmic->ver < PMU_VER_START) || (pmic->ver > PMU_VER_END)) {
+ dev_warn(dev, "PMU version %d unsupported\n", pmic->ver);
+ return -EINVAL;
+ }
+
+ hi655x_local_irq_clear(pmic->regmap);
+
+ pmic->gpio = of_get_named_gpio(np, "pmic-gpios", 0);
+ if (!gpio_is_valid(pmic->gpio)) {
+ dev_err(dev, "Failed to get the pmic-gpios\n");
+ return -ENODEV;
+ }
+
+ ret = devm_gpio_request_one(dev, pmic->gpio, GPIOF_IN,
+ "hi655x_pmic_irq");
+ if (ret < 0) {
+ dev_err(dev, "Failed to request gpio %d ret = %d\n",
+ pmic->gpio, ret);
+ return ret;
+ }
+
+ ret = regmap_add_irq_chip(pmic->regmap, gpio_to_irq(pmic->gpio),
+ IRQF_TRIGGER_LOW | IRQF_NO_SUSPEND, 0,
+ &hi655x_irq_chip, &pmic->irq_data);
+ if (ret) {
+ dev_err(dev, "Failed to obtain 'hi655x_pmic_irq' %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pmic);
+
+ /* populate sub nodes */
+ of_platform_populate(np, of_hi655x_pmic_child_match_tbl, NULL, dev);
+
+ ret = mfd_add_devices(dev, PLATFORM_DEVID_AUTO, hi655x_pmic_devs,
+ ARRAY_SIZE(hi655x_pmic_devs), NULL, 0, NULL);
+ if (ret) {
+ dev_err(dev, "Failed to register device %d\n", ret);
+ regmap_del_irq_chip(gpio_to_irq(pmic->gpio), pmic->irq_data);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hi655x_pmic_remove(struct platform_device *pdev)
+{
+ struct hi655x_pmic *pmic = platform_get_drvdata(pdev);
+
+ regmap_del_irq_chip(gpio_to_irq(pmic->gpio), pmic->irq_data);
+ mfd_remove_devices(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id hi655x_pmic_match[] = {
+ { .compatible = "hisilicon,hi655x-pmic", },
+ {},
+};
+
+static struct platform_driver hi655x_pmic_driver = {
+ .driver = {
+ .name = "hi655x-pmic",
+ .of_match_table = of_match_ptr(hi655x_pmic_match),
+ },
+ .probe = hi655x_pmic_probe,
+ .remove = hi655x_pmic_remove,
+};
+module_platform_driver(hi655x_pmic_driver);
+
+MODULE_AUTHOR("Chen Feng <puck.chen@hisilicon.com>");
+MODULE_DESCRIPTION("Hisilicon hi655x PMIC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 2763d6b3b063..460d75235709 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -466,6 +466,14 @@ config BMP085_SPI
To compile this driver as a module, choose M here: the
module will be called bmp085-spi.
+config HI6220_SYSCFG
+ bool "Hisilicon HI6220 System Configuration driver"
+ depends on ARCH_HISI
+ default y
+ help
+ Hisilicon HI6220 uses some registers to configure some chip hosts to
+ work or not, e.g. disable the UART hosts reset and let's them work.
+
config PCH_PHUB
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
select GENERIC_NET_UTILS
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index e5142b836aee..77c93b6a7a7c 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
obj-$(CONFIG_BMP085) += bmp085.o
obj-$(CONFIG_BMP085_I2C) += bmp085-i2c.o
obj-$(CONFIG_BMP085_SPI) += bmp085-spi.o
+obj-$(CONFIG_HI6220_SYSCFG) += hi6220-sysconfig.o
obj-$(CONFIG_DUMMY_IRQ) += dummy-irq.o
obj-$(CONFIG_ICS932S401) += ics932s401.o
obj-$(CONFIG_LKDTM) += lkdtm.o
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 85761d7eb333..be2c8e248e2e 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -414,7 +414,7 @@ static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
delta = mftb() - psl_tb;
if (delta < 0)
delta = -delta;
- } while (cputime_to_usecs(delta) > 16);
+ } while (tb_to_ns(delta) > 16000);
return 0;
}
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index c241e15cacb1..cbd4331fb45c 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -203,7 +203,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
mask <<= shift;
val <<= shift;
- v = (in_le32(ioaddr) & ~mask) || (val & mask);
+ v = (in_le32(ioaddr) & ~mask) | (val & mask);
out_le32(ioaddr, v);
return PCIBIOS_SUCCESSFUL;
diff --git a/drivers/misc/hi6220-sysconfig.c b/drivers/misc/hi6220-sysconfig.c
new file mode 100644
index 000000000000..9b9d5d8bff19
--- /dev/null
+++ b/drivers/misc/hi6220-sysconfig.c
@@ -0,0 +1,72 @@
+/*
+ * For Hisilicon Hi6220 SoC, the reset of some hosts (e.g. UART) should be disabled
+ * before using them, this driver will handle the host chip reset disable.
+ *
+ * Copyright (C) 2015 Hisilicon Ltd.
+ * Author: Bintian Wang <bintian.wang@huawei.com>
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#define reset_offset 0x334
+#define pclk_offset 0x230
+#define PMUSSI_REG_EX(pmu_base, reg_addr) (((reg_addr) << 2) + (char *)pmu_base)
+
+static int __init hi6220_sysconf(void)
+{
+ static void __iomem *base = NULL;
+ struct device_node *node;
+ static void __iomem *base1 = NULL;
+ struct device_node *node1;
+ unsigned char ret;
+
+ node = of_find_compatible_node(NULL, NULL, "hisilicon,hi6220-sysctrl");
+ if (!node)
+ return -ENOENT;
+
+ base = of_iomap(node, 0);
+ if (base == NULL) {
+ printk(KERN_ERR "hi6220: sysctrl reg iomap failed!\n");
+ return -ENOMEM;
+ }
+
+ /*Disable UART1 reset and set pclk*/
+ writel(BIT(5), base + reset_offset);
+ writel(BIT(5), base + pclk_offset);
+
+ /*Disable UART2 reset and set pclk*/
+ writel(BIT(6), base + reset_offset);
+ writel(BIT(6), base + pclk_offset);
+
+ /*Disable UART3 reset and set pclk*/
+ writel(BIT(7), base + reset_offset);
+ writel(BIT(7), base + pclk_offset);
+
+ /*Disable UART4 reset and set pclk*/
+ writel(BIT(8), base + reset_offset);
+ writel(BIT(8), base + pclk_offset);
+
+ iounmap(base);
+
+ node1 = of_find_compatible_node(NULL, NULL, "hisilicon,hi655x-pmic");
+ if (!node1)
+ return -ENOENT;
+
+ base1 = of_iomap(node1, 0);
+ if (base1 == NULL) {
+ printk(KERN_ERR "hi6220: pmic reg iomap failed!\n");
+ return -ENOMEM;
+ }
+
+ /*enable clk for BT/WIFI*/
+ ret = *(volatile unsigned char*)PMUSSI_REG_EX(base1, 0x1c);
+ ret |= 0x40;
+ *(volatile unsigned char*)PMUSSI_REG_EX(base1, 0x1c) = ret;
+
+ iounmap(base1);
+ return 0;
+}
+postcore_initcall(hi6220_sysconf);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 0b05aa938799..1a173d0af694 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -53,6 +53,11 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
bus = cl->dev;
mutex_lock(&bus->device_lock);
+ if (bus->dev_state != MEI_DEV_ENABLED) {
+ rets = -ENODEV;
+ goto out;
+ }
+
if (!mei_cl_is_connected(cl)) {
rets = -ENODEV;
goto out;
@@ -109,6 +114,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
bus = cl->dev;
mutex_lock(&bus->device_lock);
+ if (bus->dev_state != MEI_DEV_ENABLED) {
+ rets = -ENODEV;
+ goto out;
+ }
cb = mei_cl_read_cb(cl, NULL);
if (cb)
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index b2f2486b3d75..80f9afcb1382 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -458,7 +458,11 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request)
{
struct mei_cl *cl = file->private_data;
- return mei_cl_notify_request(cl, file, request);
+ if (request != MEI_HBM_NOTIFICATION_START &&
+ request != MEI_HBM_NOTIFICATION_STOP)
+ return -EINVAL;
+
+ return mei_cl_notify_request(cl, file, (u8)request);
}
/**
@@ -657,7 +661,9 @@ out:
* @file: pointer to file structure
* @band: band bitmap
*
- * Return: poll mask
+ * Return: negative on error,
+ * 0 if it did no changes,
+ * and positive a process was added or deleted
*/
static int mei_fasync(int fd, struct file *file, int band)
{
@@ -665,7 +671,7 @@ static int mei_fasync(int fd, struct file *file, int band)
struct mei_cl *cl = file->private_data;
if (!mei_cl_is_connected(cl))
- return POLLERR;
+ return -ENODEV;
return fasync_helper(fd, file, band, &cl->ev_async);
}
diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig
index f34dcc514730..f2df2c7352e2 100644
--- a/drivers/misc/ti-st/Kconfig
+++ b/drivers/misc/ti-st/Kconfig
@@ -14,4 +14,12 @@ config TI_ST
are returned to relevant protocol drivers based on their
packet types.
+config ST_HCI
+ tristate "HCI TTY emulation driver for Bluetooth"
+ depends on TI_ST
+ help
+ This enables the TTY device like emulation for HCI used by
+ user-space Bluetooth stacks.
+ It will provide a character device for user space Bluetooth stack to
+ send/receive data over shared transport.
endmenu
diff --git a/drivers/misc/ti-st/Makefile b/drivers/misc/ti-st/Makefile
index 78d7ebb14749..454621909fb7 100644
--- a/drivers/misc/ti-st/Makefile
+++ b/drivers/misc/ti-st/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_TI_ST) += st_drv.o
st_drv-objs := st_core.o st_kim.o st_ll.o
+obj-$(CONFIG_ST_HCI) += tty_hci.o
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 71b64550b591..ed36722da9e9 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -36,6 +36,8 @@
#include <linux/skbuff.h>
#include <linux/ti_wilink_st.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
@@ -43,6 +45,9 @@ static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
/**********************************************************************/
/* internal functions */
+struct ti_st_plat_data *dt_pdata;
+static struct ti_st_plat_data *get_platform_data(struct device *dev);
+
/**
* st_get_plat_device -
* function which returns the reference to the platform device
@@ -464,7 +469,12 @@ long st_kim_start(void *kim_data)
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
pr_info(" %s", __func__);
- pdata = kim_gdata->kim_pdev->dev.platform_data;
+ if (kim_gdata->kim_pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = kim_gdata->kim_pdev->dev.platform_data;
+ }
do {
/* platform specific enabling code here */
@@ -524,12 +534,18 @@ long st_kim_stop(void *kim_data)
{
long err = 0;
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
- struct ti_st_plat_data *pdata =
- kim_gdata->kim_pdev->dev.platform_data;
+ struct ti_st_plat_data *pdata;
struct tty_struct *tty = kim_gdata->core_data->tty;
reinit_completion(&kim_gdata->ldisc_installed);
+ if (kim_gdata->kim_pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else
+ pdata = kim_gdata->kim_pdev->dev.platform_data;
+
+
if (tty) { /* can be called before ldisc is installed */
/* Flush any pending characters in the driver and discipline. */
tty_ldisc_flush(tty);
@@ -721,13 +737,52 @@ static const struct file_operations list_debugfs_fops = {
* board-*.c file
*/
+static const struct of_device_id kim_of_match[] = {
+{
+ .compatible = "kim",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, kim_of_match);
+
+static struct ti_st_plat_data *get_platform_data(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ const u32 *dt_property;
+ int len;
+
+ dt_pdata = kzalloc(sizeof(*dt_pdata), GFP_KERNEL);
+ if (!dt_pdata)
+ return NULL;
+
+ dt_property = of_get_property(np, "dev_name", &len);
+ if (dt_property)
+ memcpy(&dt_pdata->dev_name, dt_property, len);
+ of_property_read_u32(np, "nshutdown_gpio",
+ &dt_pdata->nshutdown_gpio);
+ of_property_read_u32(np, "flow_cntrl", &dt_pdata->flow_cntrl);
+ of_property_read_u32(np, "baud_rate", &dt_pdata->baud_rate);
+
+ return dt_pdata;
+}
+
static struct dentry *kim_debugfs_dir;
static int kim_probe(struct platform_device *pdev)
{
struct kim_data_s *kim_gdata;
- struct ti_st_plat_data *pdata = pdev->dev.platform_data;
+ struct ti_st_plat_data *pdata;
int err;
+ if (pdev->dev.of_node)
+ pdata = get_platform_data(&pdev->dev);
+ else
+ pdata = pdev->dev.platform_data;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "Platform Data is missing\n");
+ return -ENXIO;
+ }
+
if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) {
/* multiple devices could exist */
st_kim_devices[pdev->id] = pdev;
@@ -808,9 +863,16 @@ err_core_init:
static int kim_remove(struct platform_device *pdev)
{
/* free the GPIOs requested */
- struct ti_st_plat_data *pdata = pdev->dev.platform_data;
+ struct ti_st_plat_data *pdata;
struct kim_data_s *kim_gdata;
+ if (pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = pdev->dev.platform_data;
+ }
+
kim_gdata = platform_get_drvdata(pdev);
/* Free the Bluetooth/FM/GPIO
@@ -828,12 +890,22 @@ static int kim_remove(struct platform_device *pdev)
kfree(kim_gdata);
kim_gdata = NULL;
+ kfree(dt_pdata);
+ dt_pdata = NULL;
+
return 0;
}
static int kim_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct ti_st_plat_data *pdata = pdev->dev.platform_data;
+ struct ti_st_plat_data *pdata;
+
+ if (pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = pdev->dev.platform_data;
+ }
if (pdata->suspend)
return pdata->suspend(pdev, state);
@@ -843,7 +915,14 @@ static int kim_suspend(struct platform_device *pdev, pm_message_t state)
static int kim_resume(struct platform_device *pdev)
{
- struct ti_st_plat_data *pdata = pdev->dev.platform_data;
+ struct ti_st_plat_data *pdata;
+
+ if (pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = pdev->dev.platform_data;
+ }
if (pdata->resume)
return pdata->resume(pdev);
@@ -860,6 +939,7 @@ static struct platform_driver kim_platform_driver = {
.resume = kim_resume,
.driver = {
.name = "kim",
+ .of_match_table = of_match_ptr(kim_of_match),
},
};
diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c
index 93b4d67cc4a3..518e1b7f2f95 100644
--- a/drivers/misc/ti-st/st_ll.c
+++ b/drivers/misc/ti-st/st_ll.c
@@ -26,6 +26,7 @@
#include <linux/ti_wilink_st.h>
/**********************************************************************/
+
/* internal functions */
static void send_ll_cmd(struct st_data_s *st_data,
unsigned char cmd)
@@ -53,7 +54,13 @@ static void ll_device_want_to_sleep(struct st_data_s *st_data)
/* communicate to platform about chip asleep */
kim_data = st_data->kim_data;
- pdata = kim_data->kim_pdev->dev.platform_data;
+ if (kim_data->kim_pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = kim_data->kim_pdev->dev.platform_data;
+ }
+
if (pdata->chip_asleep)
pdata->chip_asleep(NULL);
}
@@ -86,7 +93,13 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data)
/* communicate to platform about chip wakeup */
kim_data = st_data->kim_data;
- pdata = kim_data->kim_pdev->dev.platform_data;
+ if (kim_data->kim_pdev->dev.of_node) {
+ pr_debug("use device tree data");
+ pdata = dt_pdata;
+ } else {
+ pdata = kim_data->kim_pdev->dev.platform_data;
+ }
+
if (pdata->chip_awake)
pdata->chip_awake(NULL);
}
diff --git a/drivers/misc/ti-st/tty_hci.c b/drivers/misc/ti-st/tty_hci.c
new file mode 100644
index 000000000000..5b27b04f2785
--- /dev/null
+++ b/drivers/misc/ti-st/tty_hci.c
@@ -0,0 +1,542 @@
+/*
+ * TTY emulation for user-space Bluetooth stacks over HCI-H4
+ * Copyright (C) 2011-2012 Texas Instruments
+ * Author: Pavan Savoy <pavan_savoy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/** define one of the following for debugging
+#define DEBUG
+#define VERBOSE
+*/
+
+#define pr_fmt(fmt) "(hci_tty): " fmt
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+
+#include <linux/uaccess.h>
+#include <linux/tty.h>
+#include <linux/sched.h>
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+
+#include <linux/ti_wilink_st.h>
+
+/* Number of seconds to wait for registration completion
+ * when ST returns PENDING status.
+ */
+#define BT_REGISTER_TIMEOUT 6000 /* 6 sec */
+
+/**
+ * struct ti_st - driver operation structure
+ * @hdev: hci device pointer which binds to bt driver
+ * @reg_status: ST registration callback status
+ * @st_write: write function provided by the ST driver
+ * to be used by the driver during send_frame.
+ * @wait_reg_completion - completion sync between ti_st_open
+ * and st_reg_completion_cb.
+ */
+struct ti_st {
+ struct hci_dev *hdev;
+ char reg_status;
+ long (*st_write)(struct sk_buff *);
+ struct completion wait_reg_completion;
+ wait_queue_head_t data_q;
+ struct sk_buff_head rx_list;
+};
+
+#define DEVICE_NAME "hci_tty"
+
+/***********Functions called from ST driver**********************************/
+/* Called by Shared Transport layer when receive data is
+ * available */
+static long st_receive(void *priv_data, struct sk_buff *skb)
+{
+ struct ti_st *hst = (void *)priv_data;
+
+ pr_debug("@ %s", __func__);
+#ifdef VERBOSE
+ print_hex_dump(KERN_INFO, ">rx>", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, skb->len, 0);
+#endif
+ skb_queue_tail(&hst->rx_list, skb);
+ wake_up_interruptible(&hst->data_q);
+ return 0;
+}
+
+/* Called by ST layer to indicate protocol registration completion
+ * status.ti_st_open() function will wait for signal from this
+ * API when st_register() function returns ST_PENDING.
+ */
+static void st_reg_completion_cb(void *priv_data, char data)
+{
+ struct ti_st *lhst = (void *)priv_data;
+
+ pr_info("@ %s\n", __func__);
+ /* Save registration status for use in ti_st_open() */
+ lhst->reg_status = data;
+ /* complete the wait in ti_st_open() */
+ complete(&lhst->wait_reg_completion);
+}
+
+/* protocol structure registered with shared transport */
+#define MAX_BT_CHNL_IDS 3
+static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
+ {
+ .chnl_id = 0x04, /* HCI Events */
+ .hdr_len = 2,
+ .offset_len_in_hdr = 1,
+ .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
+ .reserve = 8,
+ },
+ {
+ .chnl_id = 0x02, /* ACL */
+ .hdr_len = 4,
+ .offset_len_in_hdr = 2,
+ .len_size = 2, /* sizeof(dlen) in struct hci_acl_hdr */
+ .reserve = 8,
+ },
+ {
+ .chnl_id = 0x03, /* SCO */
+ .hdr_len = 3,
+ .offset_len_in_hdr = 2,
+ .len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */
+ .reserve = 8,
+ },
+};
+/** hci_tty_open Function
+ * This function will perform an register on ST driver.
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @inod :
+ * Returns 0 - on success
+ * else suitable error code
+ */
+int hci_tty_open(struct inode *inod, struct file *file)
+{
+ int i = 0, err = 0;
+ unsigned long timeleft;
+ struct ti_st *hst;
+
+ pr_info("inside %s (%p, %p)\n", __func__, inod, file);
+
+ hst = kzalloc(sizeof(*hst), GFP_KERNEL);
+ file->private_data = hst;
+ hst = file->private_data;
+
+ for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
+ ti_st_proto[i].priv_data = hst;
+ ti_st_proto[i].max_frame_size = 1026;
+ ti_st_proto[i].recv = st_receive;
+ ti_st_proto[i].reg_complete_cb = st_reg_completion_cb;
+
+ /* Prepare wait-for-completion handler */
+ init_completion(&hst->wait_reg_completion);
+ /* Reset ST registration callback status flag,
+ * this value will be updated in
+ * st_reg_completion_cb()
+ * function whenever it called from ST driver.
+ */
+ hst->reg_status = -EINPROGRESS;
+
+ err = st_register(&ti_st_proto[i]);
+ if (!err)
+ goto done;
+
+ if (err != -EINPROGRESS) {
+ pr_err("st_register failed %d", err);
+ goto error;
+ }
+
+ /* ST is busy with either protocol
+ * registration or firmware download.
+ */
+ pr_debug("waiting for registration completion signal from ST");
+ timeleft = wait_for_completion_timeout
+ (&hst->wait_reg_completion,
+ msecs_to_jiffies(BT_REGISTER_TIMEOUT));
+ if (!timeleft) {
+ pr_err("Timeout(%d sec),didn't get reg completion signal from ST",
+ BT_REGISTER_TIMEOUT / 1000);
+ err = -ETIMEDOUT;
+ goto error;
+ }
+
+ /* Is ST registration callback
+ * called with ERROR status? */
+ if (hst->reg_status != 0) {
+ pr_err("ST registration completed with invalid status %d",
+ hst->reg_status);
+ err = -EAGAIN;
+ goto error;
+ }
+
+done:
+ hst->st_write = ti_st_proto[i].write;
+ if (!hst->st_write) {
+ pr_err("undefined ST write function");
+ for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
+ /* Undo registration with ST */
+ err = st_unregister(&ti_st_proto[i]);
+ if (err)
+ pr_err("st_unregister() failed with error %d",
+ err);
+ hst->st_write = NULL;
+ }
+ return -EIO;
+ }
+ }
+
+ skb_queue_head_init(&hst->rx_list);
+ init_waitqueue_head(&hst->data_q);
+
+ return 0;
+
+error:
+ kfree(hst);
+ return err;
+}
+
+/** hci_tty_release Function
+ * This function will un-registers from the ST driver.
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @inod :
+ * Returns 0 - on success
+ * else suitable error code
+ */
+int hci_tty_release(struct inode *inod, struct file *file)
+{
+ int err, i;
+ struct ti_st *hst = file->private_data;
+
+ pr_info("inside %s (%p, %p)\n", __func__, inod, file);
+
+ for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
+ err = st_unregister(&ti_st_proto[i]);
+ if (err)
+ pr_err("st_unregister(%d) failed with error %d",
+ ti_st_proto[i].chnl_id, err);
+ }
+
+ hst->st_write = NULL;
+ skb_queue_purge(&hst->rx_list);
+ kfree(hst);
+ return err;
+}
+
+/** hci_tty_read Function
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @data : Data which needs to be passed to APP
+ * @size : Length of the data passesd
+ * offset :
+ * Returns Size of packet received - on success
+ * else suitable error code
+ */
+ssize_t hci_tty_read(struct file *file, char __user *data, size_t size,
+ loff_t *offset)
+{
+ int len = 0, tout;
+ struct sk_buff *skb = NULL, *rskb = NULL;
+ struct ti_st *hst;
+
+ pr_debug("inside %s (%p, %p, %u, %p)\n",
+ __func__, file, data, size, offset);
+
+ /* Validate input parameters */
+ if ((NULL == file) || (((NULL == data) || (0 == size)))) {
+ pr_err("Invalid input params passed to %s", __func__);
+ return -EINVAL;
+ }
+
+ hst = file->private_data;
+
+ /* cannot come here if poll-ed before reading
+ * if not poll-ed wait on the same wait_q
+ */
+ tout = wait_event_interruptible_timeout(hst->data_q,
+ !skb_queue_empty(&hst->rx_list),
+ msecs_to_jiffies(1000));
+ /* Check for timed out condition */
+ if (0 == tout) {
+ pr_err("Device Read timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* hst->rx_list not empty skb already present */
+ skb = skb_dequeue(&hst->rx_list);
+ if (!skb) {
+ pr_err("dequed skb is null?\n");
+ return -EIO;
+ }
+
+#ifdef VERBOSE
+ print_hex_dump(KERN_INFO, ">in>", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, skb->len, 0);
+#endif
+
+ /* Forward the data to the user */
+ if (skb->len >= size) {
+ pr_err("FIONREAD not done before read\n");
+ return -ENOMEM;
+ } else {
+ /* returning skb */
+ rskb = alloc_skb(size, GFP_KERNEL);
+ if (!rskb) {
+ pr_err("alloc_skb error\n");
+ return -ENOMEM;
+ }
+
+ /* cb[0] has the pkt_type 0x04 or 0x02 or 0x03 */
+ memcpy(skb_put(rskb, 1), &skb->cb[0], 1);
+ memcpy(skb_put(rskb, skb->len), skb->data, skb->len);
+
+ if (copy_to_user(data, rskb->data, rskb->len)) {
+ pr_err("unable to copy to user space\n");
+ /* Queue the skb back to head */
+ skb_queue_head(&hst->rx_list, skb);
+ kfree_skb(rskb);
+ return -EIO;
+ }
+ }
+
+ len = rskb->len; /* len of returning skb */
+ kfree_skb(skb);
+ kfree_skb(rskb);
+ pr_debug("total size read= %d\n", len);
+ return len;
+}
+
+/* hci_tty_write Function
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @data : packet data from BT application
+ * @size : Size of the packet data
+ * @offset :
+ * Returns Size of packet on success
+ * else suitable error code
+ */
+ssize_t hci_tty_write(struct file *file, const char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct ti_st *hst = file->private_data;
+ struct sk_buff *skb;
+
+ pr_debug("inside %s (%p, %p, %u, %p)\n",
+ __func__, file, data, size, offset);
+
+ if (!hst->st_write) {
+ pr_err(" Can't write to ST, hhci_tty->st_write null ?");
+ return -EINVAL;
+ }
+
+ skb = alloc_skb(size, GFP_KERNEL);
+ /* Validate Created SKB */
+ if (NULL == skb) {
+ pr_err("Error aaloacting SKB");
+ return -ENOMEM;
+ }
+
+ /* Forward the data from the user space to ST core */
+ if (copy_from_user(skb_put(skb, size), data, size)) {
+ pr_err(" Unable to copy from user space");
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+#ifdef VERBOSE
+ pr_debug("start data..");
+ print_hex_dump(KERN_INFO, "<out<", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, size, 0);
+ pr_debug("\n..end data");
+#endif
+
+ hst->st_write(skb);
+ return size;
+}
+
+/** hci_tty_ioctl Function
+ * This will peform the functions as directed by the command and command
+ * argument.
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @cmd : IOCTL Command
+ * @arg : Command argument for IOCTL command
+ * Returns 0 on success
+ * else suitable error code
+ */
+static long hci_tty_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct sk_buff *skb = NULL;
+ int retcode = 0;
+ struct ti_st *hst;
+
+ pr_debug("inside %s (%p, %u, %lx)", __func__, file, cmd, arg);
+
+ /* Validate input parameters */
+ if ((NULL == file) || (0 == cmd)) {
+ pr_err("invalid input parameters passed to %s", __func__);
+ return -EINVAL;
+ }
+
+ hst = file->private_data;
+
+ switch (cmd) {
+ case FIONREAD:
+ /* Deque the SKB from the head if rx_list is not empty
+ * update the argument with skb->len to provide amount of data
+ * available in the available SKB +1 for the PKT_TYPE
+ * field not provided in data by TI-ST.
+ */
+ skb = skb_dequeue(&hst->rx_list);
+ if (skb != NULL) {
+ *(unsigned int *)arg = skb->len + 1;
+ /* Re-Store the SKB for furtur Read operations */
+ skb_queue_head(&hst->rx_list, skb);
+ } else {
+ *(unsigned int *)arg = 0;
+ }
+ pr_debug("returning %d\n", *(unsigned int *)arg);
+ break;
+ default:
+ pr_debug("Un-Identified IOCTL %d", cmd);
+ retcode = 0;
+ break;
+ }
+
+ return retcode;
+}
+
+/** hci_tty_poll Function
+ * This function will wait till some data is received to the hci_tty driver from ST
+ *
+ * Parameters :
+ * @file : File pointer for BT char driver
+ * @wait : POLL wait information
+ * Returns status of POLL on success
+ * else suitable error code
+ */
+static unsigned int hci_tty_poll(struct file *file, poll_table *wait)
+{
+ struct ti_st *hst = file->private_data;
+ unsigned long mask = 0;
+
+ pr_debug("@ %s\n", __func__);
+
+ /* wait to be completed by st_receive */
+ poll_wait(file, &hst->data_q, wait);
+ pr_debug("poll broke\n");
+
+ if (!skb_queue_empty(&hst->rx_list)) {
+ pr_debug("rx list que !empty\n");
+ mask |= POLLIN; /* TODO: check app for mask */
+ }
+
+ return mask;
+}
+
+/* BT Char driver function pointers
+ * These functions are called from USER space by pefroming File Operations
+ * on /dev/hci_tty node exposed by this driver during init
+ */
+const struct file_operations hci_tty_chrdev_ops = {
+ .owner = THIS_MODULE,
+ .open = hci_tty_open,
+ .read = hci_tty_read,
+ .write = hci_tty_write,
+ .unlocked_ioctl = hci_tty_ioctl,
+ .poll = hci_tty_poll,
+ .release = hci_tty_release,
+};
+
+/*********Functions called during insmod and delmod****************************/
+
+static int hci_tty_major; /* major number */
+static struct class *hci_tty_class; /* class during class_create */
+static struct device *hci_tty_dev; /* dev during device_create */
+/** hci_tty_init Function
+ * This function Initializes the hci_tty driver parametes and exposes
+ * /dev/hci_tty node to user space
+ *
+ * Parameters : NULL
+ * Returns 0 on success
+ * else suitable error code
+ */
+static int __init hci_tty_init(void)
+{
+ pr_info("inside %s\n", __func__);
+
+ /* Expose the device DEVICE_NAME to user space
+ * And obtain the major number for the device
+ */
+ hci_tty_major = register_chrdev(0, DEVICE_NAME, &hci_tty_chrdev_ops);
+
+ if (0 > hci_tty_major) {
+ pr_err("Error when registering to char dev");
+ return hci_tty_major;
+ }
+
+ /* udev */
+ hci_tty_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(hci_tty_class)) {
+ pr_err("Something went wrong in class_create");
+ unregister_chrdev(hci_tty_major, DEVICE_NAME);
+ return -1;
+ }
+
+ hci_tty_dev =
+ device_create(hci_tty_class, NULL, MKDEV(hci_tty_major, 0),
+ NULL, DEVICE_NAME);
+ if (IS_ERR(hci_tty_dev)) {
+ pr_err("Error in device create");
+ unregister_chrdev(hci_tty_major, DEVICE_NAME);
+ class_destroy(hci_tty_class);
+ return -1;
+ }
+ pr_info("allocated %d, %d\n", hci_tty_major, 0);
+ return 0;
+}
+
+/** hci_tty_exit Function
+ * This function Destroys the hci_tty driver parametes and /dev/hci_tty node
+ *
+ * Parameters : NULL
+ * Returns NULL
+ */
+static void __exit hci_tty_exit(void)
+{
+ pr_info("inside %s\n", __func__);
+ pr_info("bye.. freeing up %d\n", hci_tty_major);
+
+ device_destroy(hci_tty_class, MKDEV(hci_tty_major, 0));
+ class_destroy(hci_tty_class);
+ unregister_chrdev(hci_tty_major, DEVICE_NAME);
+}
+
+module_init(hci_tty_init);
+module_exit(hci_tty_exit);
+
+MODULE_AUTHOR("Pavan Savoy <pavan_savoy@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 3a9a79ec4343..3d5087b03999 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1076,8 +1076,7 @@ static int mmc_select_hs400(struct mmc_card *card)
mmc_set_clock(host, max_dtr);
/* Switch card to HS mode */
- val = EXT_CSD_TIMING_HS |
- card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
+ val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time,
@@ -1160,8 +1159,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
mmc_set_clock(host, max_dtr);
/* Switch HS400 to HS DDR */
- val = EXT_CSD_TIMING_HS |
- card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
+ val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
val, card->ext_csd.generic_cmd6_time,
true, send_status, true);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 4ac721fea4ef..ee145d4cc541 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -626,9 +626,9 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
* SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
*/
if (!mmc_host_is_spi(card->host) &&
- (card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
- card->sd_bus_speed == UHS_DDR50_BUS_SPEED ||
- card->sd_bus_speed == UHS_SDR104_BUS_SPEED)) {
+ (card->host->ios.timing == MMC_TIMING_UHS_SDR50 ||
+ card->host->ios.timing == MMC_TIMING_UHS_DDR50 ||
+ card->host->ios.timing == MMC_TIMING_UHS_SDR104)) {
err = mmc_execute_tuning(card);
/*
@@ -638,7 +638,7 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
* difference between v3.00 and 3.01 spec means that CMD19
* tuning is also available for DDR50 mode.
*/
- if (err && card->sd_bus_speed == UHS_DDR50_BUS_SPEED) {
+ if (err && card->host->ios.timing == MMC_TIMING_UHS_DDR50) {
pr_warn("%s: ddr50 tuning failed\n",
mmc_hostname(card->host));
err = 0;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index b5a1c0220493..b47957122fd7 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -541,8 +541,8 @@ static int mmc_sdio_init_uhs_card(struct mmc_card *card)
* SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
*/
if (!mmc_host_is_spi(card->host) &&
- ((card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50) ||
- (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)))
+ ((card->host->ios.timing == MMC_TIMING_UHS_SDR50) ||
+ (card->host->ios.timing == MMC_TIMING_UHS_SDR104)))
err = mmc_execute_tuning(card);
out:
return err;
@@ -636,7 +636,7 @@ try_again:
*/
if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
- ocr);
+ ocr_card);
if (err == -EAGAIN) {
sdio_reset(host);
mmc_go_idle(host);
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 63c2e2ed1288..6aa6d0d63f7f 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -32,6 +32,12 @@ struct k3_priv {
struct regmap *reg;
};
+static unsigned long dw_mci_hi6220_caps[] = {
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+ 0
+};
+
static void dw_mci_k3_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
int ret;
@@ -125,10 +131,17 @@ static void dw_mci_hi6220_set_ios(struct dw_mci *host, struct mmc_ios *ios)
host->bus_hz = clk_get_rate(host->biu_clk);
}
+static void dw_mci_hi6220_prepare_command(struct dw_mci *host, u32 *cmdr)
+{
+ *cmdr |= SDMMC_CMD_USE_HOLD_REG;
+}
+
static const struct dw_mci_drv_data hi6220_data = {
+ .caps = dw_mci_hi6220_caps,
.switch_voltage = dw_mci_hi6220_switch_voltage,
.set_ios = dw_mci_hi6220_set_ios,
.parse_dt = dw_mci_hi6220_parse_dt,
+ .prepare_command = dw_mci_hi6220_prepare_command,
};
static const struct of_device_id dw_mci_k3_match[] = {
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 7a6cedbe48a8..86db70ecad62 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -250,7 +250,7 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
if (cmd->opcode == SD_SWITCH_VOLTAGE) {
- u32 clk_en_a;
+ /*u32 clk_en_a;*/
/* Special bit makes CMD11 not die */
cmdr |= SDMMC_CMD_VOLT_SWITCH;
@@ -270,11 +270,11 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
* ever called with a non-zero clock. That shouldn't happen
* until the voltage change is all done.
*/
- clk_en_a = mci_readl(host, CLKENA);
- clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
- mci_writel(host, CLKENA, clk_en_a);
- mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
- SDMMC_CMD_PRV_DAT_WAIT, 0);
+ /*clk_en_a = mci_readl(host, CLKENA);*/
+ /*clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);*/
+ /*mci_writel(host, CLKENA, clk_en_a);*/
+ /*mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |*/
+ /*SDMMC_CMD_PRV_DAT_WAIT, 0);*/
}
if (cmd->flags & MMC_RSP_PRESENT) {
@@ -1124,7 +1124,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
/* enable clock; only low power if no SDIO */
clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
- if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
+ if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags) && (slot->mmc->index != 1))
clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
mci_writel(host, CLKENA, clk_en_a);
@@ -1298,6 +1298,8 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
regs |= ((0x1 << slot->id) << 16);
else
regs &= ~((0x1 << slot->id) << 16);
+ if (mmc->index == 1)
+ regs |= (0x1 << slot->id);
mci_writel(slot->host, UHS_REG, regs);
slot->host->timing = ios->timing;
@@ -1540,6 +1542,8 @@ static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (drv_data && drv_data->execute_tuning)
err = drv_data->execute_tuning(slot, opcode);
+ else
+ err = 0;
return err;
}
@@ -2879,6 +2883,13 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
if (!pdata)
return ERR_PTR(-ENOMEM);
+ /* find reset controller when exist */
+ pdata->rstc = devm_reset_control_get_optional(dev, NULL);
+ if (IS_ERR(pdata->rstc))
+ pdata->rstc = NULL;
+ else
+ reset_control_deassert(pdata->rstc);
+
/* find out number of slots supported */
if (of_property_read_u32(dev->of_node, "num-slots",
&pdata->num_slots)) {
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 1c1b45ef3faf..aad3243a48fc 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1436,6 +1436,12 @@ static int mmc_spi_probe(struct spi_device *spi)
host->pdata->cd_debounce);
if (status != 0)
goto fail_add_host;
+
+ /* The platform has a CD GPIO signal that may support
+ * interrupts, so let mmc_gpiod_request_cd_irq() decide
+ * if polling is needed or not.
+ */
+ mmc->caps &= ~MMC_CAP_NEEDS_POLL;
mmc_gpiod_request_cd_irq(mmc);
}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index fb266745f824..acece3299756 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1886,7 +1886,7 @@ static struct amba_id mmci_ids[] = {
{
.id = 0x00280180,
.mask = 0x00ffffff,
- .data = &variant_u300,
+ .data = &variant_nomadik,
},
{
.id = 0x00480180,
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index ce08896b9d69..28a057fae0a1 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -804,7 +804,7 @@ static int pxamci_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
goto out;
} else {
- mmc->caps |= host->pdata->gpio_card_ro_invert ?
+ mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
0 : MMC_CAP2_RO_ACTIVE_HIGH;
}
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index f6047fc94062..a5cda926d38e 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -146,6 +146,33 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
.ops = &sdhci_acpi_ops_int,
};
+static int bxt_get_cd(struct mmc_host *mmc)
+{
+ int gpio_cd = mmc_gpio_get_cd(mmc);
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int ret = 0;
+
+ if (!gpio_cd)
+ return 0;
+
+ pm_runtime_get_sync(mmc->parent);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ goto out;
+
+ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ pm_runtime_mark_last_busy(mmc->parent);
+ pm_runtime_put_autosuspend(mmc->parent);
+
+ return ret;
+}
+
static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
const char *hid, const char *uid)
{
@@ -196,6 +223,9 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
/* Platform specific code during sd probe slot goes here */
+ if (hid && !strcmp(hid, "80865ACA"))
+ host->mmc_host_ops.get_cd = bxt_get_cd;
+
return 0;
}
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index cf7ad458b4f4..610154836d79 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -277,7 +277,7 @@ static int spt_select_drive_strength(struct sdhci_host *host,
if (sdhci_pci_spt_drive_strength > 0)
drive_strength = sdhci_pci_spt_drive_strength & 0xf;
else
- drive_strength = 1; /* 33-ohm */
+ drive_strength = 0; /* Default 50-ohm */
if ((mmc_driver_type_mask(drive_strength) & card_drv) == 0)
drive_strength = 0; /* Default 50-ohm */
@@ -330,6 +330,33 @@ static void spt_read_drive_strength(struct sdhci_host *host)
sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf);
}
+static int bxt_get_cd(struct mmc_host *mmc)
+{
+ int gpio_cd = mmc_gpio_get_cd(mmc);
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int ret = 0;
+
+ if (!gpio_cd)
+ return 0;
+
+ pm_runtime_get_sync(mmc->parent);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ goto out;
+
+ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ pm_runtime_mark_last_busy(mmc->parent);
+ pm_runtime_put_autosuspend(mmc->parent);
+
+ return ret;
+}
+
static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
@@ -362,6 +389,11 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
slot->cd_con_id = NULL;
slot->cd_idx = 0;
slot->cd_override_level = true;
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
+ slot->host->mmc_host_ops.get_cd = bxt_get_cd;
+
return 0;
}
@@ -1142,6 +1174,30 @@ static const struct pci_device_id pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXTM_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXTM_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXTM_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_APL_EMMC,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index d1a0b4db60db..89e7151684a1 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -28,6 +28,9 @@
#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
+#define PCI_DEVICE_ID_INTEL_BXTM_SD 0x1aca
+#define PCI_DEVICE_ID_INTEL_BXTM_EMMC 0x1acc
+#define PCI_DEVICE_ID_INTEL_BXTM_SDIO 0x1ad0
#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca
#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc
#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index b48565ed5616..1a802af827ed 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -540,9 +540,12 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
BUG_ON(len > 65536);
- /* tran, valid */
- sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID);
- desc += host->desc_sz;
+ if (len) {
+ /* tran, valid */
+ sdhci_adma_write_desc(host, desc, addr, len,
+ ADMA2_TRAN_VALID);
+ desc += host->desc_sz;
+ }
/*
* If this triggers then we have a calculation bug
@@ -663,9 +666,20 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
if (!data)
target_timeout = cmd->busy_timeout * 1000;
else {
- target_timeout = data->timeout_ns / 1000;
- if (host->clock)
- target_timeout += data->timeout_clks / host->clock;
+ target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
+ if (host->clock && data->timeout_clks) {
+ unsigned long long val;
+
+ /*
+ * data->timeout_clks is in units of clock cycles.
+ * host->clock is in Hz. target_timeout is in us.
+ * Hence, us = 1000000 * cycles / Hz. Round up.
+ */
+ val = 1000000 * data->timeout_clks;
+ if (do_div(val, host->clock))
+ target_timeout++;
+ target_timeout += val;
+ }
}
/*
@@ -1364,7 +1378,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
sdhci_runtime_pm_get(host);
/* Firstly check card presence */
- present = sdhci_do_get_cd(host);
+ present = mmc->ops->get_cd(mmc);
spin_lock_irqsave(&host->lock, flags);
@@ -2760,7 +2774,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host)
static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
{
- if (host->runtime_suspended || host->bus_on)
+ if (host->bus_on)
return;
host->bus_on = true;
pm_runtime_get_noresume(host->mmc->parent);
@@ -2768,7 +2782,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
{
- if (host->runtime_suspended || !host->bus_on)
+ if (!host->bus_on)
return;
host->bus_on = false;
pm_runtime_put_noidle(host->mmc->parent);
@@ -2861,6 +2875,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
host = mmc_priv(mmc);
host->mmc = mmc;
+ host->mmc_host_ops = sdhci_ops;
+ mmc->ops = &host->mmc_host_ops;
return host;
}
@@ -3057,7 +3073,6 @@ int sdhci_add_host(struct sdhci_host *host)
/*
* Set host parameters.
*/
- mmc->ops = &sdhci_ops;
max_clk = host->max_clk;
if (host->ops->get_min_clock)
@@ -3091,14 +3106,14 @@ int sdhci_add_host(struct sdhci_host *host)
if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
host->timeout_clk *= 1000;
+ if (override_timeout_clk)
+ host->timeout_clk = override_timeout_clk;
+
mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
host->ops->get_max_timeout_count(host) : 1 << 27;
mmc->max_busy_timeout /= host->timeout_clk;
}
- if (override_timeout_clk)
- host->timeout_clk = override_timeout_clk;
-
mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 9d4aa31b683a..9c331ac5ad6b 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -425,6 +425,7 @@ struct sdhci_host {
/* Internal data */
struct mmc_host *mmc; /* MMC structure */
+ struct mmc_host_ops mmc_host_ops; /* MMC host ops */
u64 dma_mask; /* custom DMA mask */
#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index ad9ffea7d659..6234eab38ff3 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -397,38 +397,26 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
}
static struct dma_chan *
-sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
- struct sh_mmcif_plat_data *pdata,
- enum dma_transfer_direction direction)
+sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id)
{
- struct dma_slave_config cfg = { 0, };
- struct dma_chan *chan;
- void *slave_data = NULL;
- struct resource *res;
- struct device *dev = sh_mmcif_host_to_dev(host);
dma_cap_mask_t mask;
- int ret;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
+ if (slave_id <= 0)
+ return NULL;
- if (pdata)
- slave_data = direction == DMA_MEM_TO_DEV ?
- (void *)pdata->slave_id_tx :
- (void *)pdata->slave_id_rx;
-
- chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
- slave_data, dev,
- direction == DMA_MEM_TO_DEV ? "tx" : "rx");
-
- dev_dbg(dev, "%s: %s: got channel %p\n", __func__,
- direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan);
+ return dma_request_channel(mask, shdma_chan_filter, (void *)slave_id);
+}
- if (!chan)
- return NULL;
+static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
+ struct dma_chan *chan,
+ enum dma_transfer_direction direction)
+{
+ struct resource *res;
+ struct dma_slave_config cfg = { 0, };
res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
-
cfg.direction = direction;
if (direction == DMA_DEV_TO_MEM) {
@@ -439,38 +427,42 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
}
- ret = dmaengine_slave_config(chan, &cfg);
- if (ret < 0) {
- dma_release_channel(chan);
- return NULL;
- }
-
- return chan;
+ return dmaengine_slave_config(chan, &cfg);
}
-static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
- struct sh_mmcif_plat_data *pdata)
+static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
{
struct device *dev = sh_mmcif_host_to_dev(host);
host->dma_active = false;
- if (pdata) {
- if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
- return;
- } else if (!dev->of_node) {
- return;
+ /* We can only either use DMA for both Tx and Rx or not use it at all */
+ if (IS_ENABLED(CONFIG_SUPERH) && dev->platform_data) {
+ struct sh_mmcif_plat_data *pdata = dev->platform_data;
+
+ host->chan_tx = sh_mmcif_request_dma_pdata(host,
+ pdata->slave_id_tx);
+ host->chan_rx = sh_mmcif_request_dma_pdata(host,
+ pdata->slave_id_rx);
+ } else {
+ host->chan_tx = dma_request_slave_channel(dev, "tx");
+ host->chan_rx = dma_request_slave_channel(dev, "rx");
}
+ dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
+ host->chan_rx);
- /* We can only either use DMA for both Tx and Rx or not use it at all */
- host->chan_tx = sh_mmcif_request_dma_one(host, pdata, DMA_MEM_TO_DEV);
- if (!host->chan_tx)
- return;
+ if (!host->chan_tx || !host->chan_rx ||
+ sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) ||
+ sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM))
+ goto error;
- host->chan_rx = sh_mmcif_request_dma_one(host, pdata, DMA_DEV_TO_MEM);
- if (!host->chan_rx) {
+ return;
+
+error:
+ if (host->chan_tx)
dma_release_channel(host->chan_tx);
- host->chan_tx = NULL;
- }
+ if (host->chan_rx)
+ dma_release_channel(host->chan_rx);
+ host->chan_tx = host->chan_rx = NULL;
}
static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
@@ -1102,7 +1094,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->power_mode == MMC_POWER_UP) {
if (!host->card_present) {
/* See if we also get DMA */
- sh_mmcif_request_dma(host, dev->platform_data);
+ sh_mmcif_request_dma(host);
host->card_present = true;
}
sh_mmcif_set_power(host, ios);
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 4498e92116b8..b47122d3e8d8 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1634,7 +1634,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
struct mmc_request *mrq = host->mrq;
struct mmc_data *data = mrq ? mrq->data : NULL;
- struct scatterlist *sg = host->sg ?: data->sg;
+ struct scatterlist *sg;
dev_warn(mmc_dev(host->mmc),
"%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
@@ -1666,6 +1666,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
case USDHI6_WAIT_FOR_MWRITE:
case USDHI6_WAIT_FOR_READ:
case USDHI6_WAIT_FOR_WRITE:
+ sg = host->sg ?: data->sg;
dev_dbg(mmc_dev(host->mmc),
"%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index ece544efccc3..3ff583f165cd 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -3995,6 +3995,9 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
return ret;
}
+ if (!mtd->name && mtd->dev.parent)
+ mtd->name = dev_name(mtd->dev.parent);
+
/* Set the default functions */
nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 43b3392ffee7..652d01832873 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -2599,6 +2599,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
*/
static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
+ struct onenand_chip *this = mtd->priv;
int ret;
ret = onenand_block_isbad(mtd, ofs);
@@ -2610,7 +2611,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
onenand_get_device(mtd, FL_WRITING);
- ret = mtd_block_markbad(mtd, ofs);
+ ret = this->block_markbad(mtd, ofs);
onenand_release_device(mtd);
return ret;
}
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 2a1b6e037e1a..0134ba32a057 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
vol->changing_leb = 1;
vol->ch_lnum = req->lnum;
- vol->upd_buf = vmalloc(req->bytes);
+ vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
if (!vol->upd_buf)
return -ENOMEM;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 9e0f8a7ef8b1..b3d70a7a5262 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -214,6 +214,8 @@ static void bond_uninit(struct net_device *bond_dev);
static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 *stats);
static void bond_slave_arr_handler(struct work_struct *work);
+static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
+ int mod);
/*---------------------------- General routines -----------------------------*/
@@ -1207,7 +1209,6 @@ static int bond_master_upper_dev_link(struct net_device *bond_dev,
err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave);
if (err)
return err;
- slave_dev->flags |= IFF_SLAVE;
rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
return 0;
}
@@ -1465,6 +1466,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
}
+ /* set slave flag before open to prevent IPv6 addrconf */
+ slave_dev->flags |= IFF_SLAVE;
+
/* open the slave since the application closed it */
res = dev_open(slave_dev);
if (res) {
@@ -1725,6 +1729,7 @@ err_close:
dev_close(slave_dev);
err_restore_mac:
+ slave_dev->flags &= ~IFF_SLAVE;
if (!bond->params.fail_over_mac ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* XXX TODO - fom follow mode needs to change master's
@@ -2415,7 +2420,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
struct arphdr *arp = (struct arphdr *)skb->data;
- struct slave *curr_active_slave;
+ struct slave *curr_active_slave, *curr_arp_slave;
unsigned char *arp_ptr;
__be32 sip, tip;
int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
@@ -2462,26 +2467,41 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
&sip, &tip);
curr_active_slave = rcu_dereference(bond->curr_active_slave);
+ curr_arp_slave = rcu_dereference(bond->current_arp_slave);
- /* Backup slaves won't see the ARP reply, but do come through
- * here for each ARP probe (so we swap the sip/tip to validate
- * the probe). In a "redundant switch, common router" type of
- * configuration, the ARP probe will (hopefully) travel from
- * the active, through one switch, the router, then the other
- * switch before reaching the backup.
+ /* We 'trust' the received ARP enough to validate it if:
+ *
+ * (a) the slave receiving the ARP is active (which includes the
+ * current ARP slave, if any), or
+ *
+ * (b) the receiving slave isn't active, but there is a currently
+ * active slave and it received valid arp reply(s) after it became
+ * the currently active slave, or
*
- * We 'trust' the arp requests if there is an active slave and
- * it received valid arp reply(s) after it became active. This
- * is done to avoid endless looping when we can't reach the
+ * (c) there is an ARP slave that sent an ARP during the prior ARP
+ * interval, and we receive an ARP reply on any slave. We accept
+ * these because switch FDB update delays may deliver the ARP
+ * reply to a slave other than the sender of the ARP request.
+ *
+ * Note: for (b), backup slaves are receiving the broadcast ARP
+ * request, not a reply. This request passes from the sending
+ * slave through the L2 switch(es) to the receiving slave. Since
+ * this is checking the request, sip/tip are swapped for
+ * validation.
+ *
+ * This is done to avoid endless looping when we can't reach the
* arp_ip_target and fool ourselves with our own arp requests.
*/
-
if (bond_is_active_slave(slave))
bond_validate_arp(bond, slave, sip, tip);
else if (curr_active_slave &&
time_after(slave_last_rx(bond, curr_active_slave),
curr_active_slave->last_link_up))
bond_validate_arp(bond, slave, tip, sip);
+ else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
+ bond_time_in_interval(bond,
+ dev_trans_start(curr_arp_slave->dev), 1))
+ bond_validate_arp(bond, slave, sip, tip);
out_unlock:
if (arp != (struct arphdr *)skb->data)
@@ -3240,6 +3260,30 @@ static int bond_close(struct net_device *bond_dev)
return 0;
}
+/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
+ * that some drivers can provide 32bit values only.
+ */
+static void bond_fold_stats(struct rtnl_link_stats64 *_res,
+ const struct rtnl_link_stats64 *_new,
+ const struct rtnl_link_stats64 *_old)
+{
+ const u64 *new = (const u64 *)_new;
+ const u64 *old = (const u64 *)_old;
+ u64 *res = (u64 *)_res;
+ int i;
+
+ for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
+ u64 nv = new[i];
+ u64 ov = old[i];
+
+ /* detects if this particular field is 32bit only */
+ if (((nv | ov) >> 32) == 0)
+ res[i] += (u32)nv - (u32)ov;
+ else
+ res[i] += nv - ov;
+ }
+}
+
static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 *stats)
{
@@ -3248,43 +3292,23 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
struct list_head *iter;
struct slave *slave;
+ spin_lock(&bond->stats_lock);
memcpy(stats, &bond->bond_stats, sizeof(*stats));
- bond_for_each_slave(bond, slave, iter) {
- const struct rtnl_link_stats64 *sstats =
+ rcu_read_lock();
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ const struct rtnl_link_stats64 *new =
dev_get_stats(slave->dev, &temp);
- struct rtnl_link_stats64 *pstats = &slave->slave_stats;
-
- stats->rx_packets += sstats->rx_packets - pstats->rx_packets;
- stats->rx_bytes += sstats->rx_bytes - pstats->rx_bytes;
- stats->rx_errors += sstats->rx_errors - pstats->rx_errors;
- stats->rx_dropped += sstats->rx_dropped - pstats->rx_dropped;
-
- stats->tx_packets += sstats->tx_packets - pstats->tx_packets;;
- stats->tx_bytes += sstats->tx_bytes - pstats->tx_bytes;
- stats->tx_errors += sstats->tx_errors - pstats->tx_errors;
- stats->tx_dropped += sstats->tx_dropped - pstats->tx_dropped;
-
- stats->multicast += sstats->multicast - pstats->multicast;
- stats->collisions += sstats->collisions - pstats->collisions;
-
- stats->rx_length_errors += sstats->rx_length_errors - pstats->rx_length_errors;
- stats->rx_over_errors += sstats->rx_over_errors - pstats->rx_over_errors;
- stats->rx_crc_errors += sstats->rx_crc_errors - pstats->rx_crc_errors;
- stats->rx_frame_errors += sstats->rx_frame_errors - pstats->rx_frame_errors;
- stats->rx_fifo_errors += sstats->rx_fifo_errors - pstats->rx_fifo_errors;
- stats->rx_missed_errors += sstats->rx_missed_errors - pstats->rx_missed_errors;
-
- stats->tx_aborted_errors += sstats->tx_aborted_errors - pstats->tx_aborted_errors;
- stats->tx_carrier_errors += sstats->tx_carrier_errors - pstats->tx_carrier_errors;
- stats->tx_fifo_errors += sstats->tx_fifo_errors - pstats->tx_fifo_errors;
- stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors - pstats->tx_heartbeat_errors;
- stats->tx_window_errors += sstats->tx_window_errors - pstats->tx_window_errors;
+
+ bond_fold_stats(stats, new, &slave->slave_stats);
/* save off the slave stats for the next run */
- memcpy(pstats, sstats, sizeof(*sstats));
+ memcpy(&slave->slave_stats, new, sizeof(*new));
}
+ rcu_read_unlock();
+
memcpy(&bond->bond_stats, stats, sizeof(*stats));
+ spin_unlock(&bond->stats_lock);
return stats;
}
@@ -4098,6 +4122,7 @@ void bond_setup(struct net_device *bond_dev)
struct bonding *bond = netdev_priv(bond_dev);
spin_lock_init(&bond->mode_lock);
+ spin_lock_init(&bond->stats_lock);
bond->params = bonding_defaults;
/* Initialize pointers */
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index fc5b75675cd8..eb7192fab593 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2");
*/
#define EMS_USB_ARM7_CLOCK 8000000
+#define CPC_TX_QUEUE_TRIGGER_LOW 25
+#define CPC_TX_QUEUE_TRIGGER_HIGH 35
+
/*
* CAN-Message representation in a CPC_MSG. Message object type is
* CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
switch (urb->status) {
case 0:
dev->free_slots = dev->intr_in_buffer[1];
+ if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
+ if (netif_queue_stopped(netdev)){
+ netif_wake_queue(netdev);
+ }
+ }
break;
case -ECONNRESET: /* unlink */
@@ -526,8 +534,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
/* Release context */
context->echo_index = MAX_TX_URBS;
- if (netif_queue_stopped(netdev))
- netif_wake_queue(netdev);
}
/*
@@ -587,7 +593,7 @@ static int ems_usb_start(struct ems_usb *dev)
int err, i;
dev->intr_in_buffer[0] = 0;
- dev->free_slots = 15; /* initial size */
+ dev->free_slots = 50; /* initial size */
for (i = 0; i < MAX_RX_URBS; i++) {
struct urb *urb = NULL;
@@ -835,7 +841,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
/* Slow down tx path */
if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
- dev->free_slots < 5) {
+ dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
netif_stop_queue(netdev);
}
}
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 5eee62badf45..cbc99d5649af 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -826,9 +826,8 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface
static void gs_destroy_candev(struct gs_can *dev)
{
unregister_candev(dev->netdev);
- free_candev(dev->netdev);
usb_kill_anchored_urbs(&dev->tx_submitted);
- kfree(dev);
+ free_candev(dev->netdev);
}
static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
@@ -913,12 +912,15 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
for (i = 0; i < icount; i++) {
dev->canch[i] = gs_make_candev(i, intf);
if (IS_ERR_OR_NULL(dev->canch[i])) {
+ /* save error code to return later */
+ rc = PTR_ERR(dev->canch[i]);
+
/* on failure destroy previously created candevs */
icount = i;
- for (i = 0; i < icount; i++) {
+ for (i = 0; i < icount; i++)
gs_destroy_candev(dev->canch[i]);
- dev->canch[i] = NULL;
- }
+
+ usb_kill_anchored_urbs(&dev->rx_submitted);
kfree(dev);
return rc;
}
@@ -939,16 +941,12 @@ static void gs_usb_disconnect(struct usb_interface *intf)
return;
}
- for (i = 0; i < GS_MAX_INTF; i++) {
- struct gs_can *can = dev->canch[i];
-
- if (!can)
- continue;
-
- gs_destroy_candev(can);
- }
+ for (i = 0; i < GS_MAX_INTF; i++)
+ if (dev->canch[i])
+ gs_destroy_candev(dev->canch[i]);
usb_kill_anchored_urbs(&dev->rx_submitted);
+ kfree(dev);
}
static const struct usb_device_id gs_usb_table[] = {
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index b06dba05594a..2dea39b5cb0b 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1519,7 +1519,7 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
/* no PVID with ranges, otherwise it's a bug */
if (pvid)
- err = _mv88e6xxx_port_pvid_set(ds, port, vid);
+ err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
unlock:
mutex_unlock(&ps->smi_mutex);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 17f017ab4dac..0fb3f8de88e9 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1197,7 +1197,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
dev->stats.tx_bytes += tx_cb_ptr->skb->len;
dma_unmap_single(&dev->dev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
- tx_cb_ptr->skb->len,
+ dma_unmap_len(tx_cb_ptr, dma_len),
DMA_TO_DEVICE);
bcmgenet_free_cb(tx_cb_ptr);
} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
@@ -1308,7 +1308,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
}
dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
- dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
DMA_TX_APPEND_CRC;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 79789d8e52da..ca5ac5d6f4e6 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7833,6 +7833,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
return ret;
}
+static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
+{
+ /* Check if we will never have enough descriptors,
+ * as gso_segs can be more than current ring size
+ */
+ return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
+}
+
static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
/* Use GSO to workaround all TSO packets that meet HW bug conditions
@@ -7936,14 +7944,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
* vlan encapsulated.
*/
if (skb->protocol == htons(ETH_P_8021Q) ||
- skb->protocol == htons(ETH_P_8021AD))
- return tg3_tso_bug(tp, tnapi, txq, skb);
+ skb->protocol == htons(ETH_P_8021AD)) {
+ if (tg3_tso_bug_gso_check(tnapi, skb))
+ return tg3_tso_bug(tp, tnapi, txq, skb);
+ goto drop;
+ }
if (!skb_is_gso_v6(skb)) {
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
- tg3_flag(tp, TSO_BUG))
- return tg3_tso_bug(tp, tnapi, txq, skb);
-
+ tg3_flag(tp, TSO_BUG)) {
+ if (tg3_tso_bug_gso_check(tnapi, skb))
+ return tg3_tso_bug(tp, tnapi, txq, skb);
+ goto drop;
+ }
ip_csum = iph->check;
ip_tot_len = iph->tot_len;
iph->check = 0;
@@ -8075,7 +8088,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (would_hit_hwbug) {
tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
- if (mss) {
+ if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
/* If it's a TSO packet, do GSO instead of
* allocating and copying to a large linear SKB
*/
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 1671fa3332c2..7ba6d530b0c0 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.3.0.12"
+#define DRV_VERSION "2.3.0.20"
#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 1ffd1050860b..1fdf5fe12a95 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
int wait)
{
struct devcmd2_controller *dc2c = vdev->devcmd2;
- struct devcmd2_result *result = dc2c->result + dc2c->next_result;
+ struct devcmd2_result *result;
+ u8 color;
unsigned int i;
int delay, err;
u32 fetch_index, new_posted;
@@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
return 0;
+ result = dc2c->result + dc2c->next_result;
+ color = dc2c->color;
+
+ dc2c->next_result++;
+ if (dc2c->next_result == dc2c->result_size) {
+ dc2c->next_result = 0;
+ dc2c->color = dc2c->color ? 0 : 1;
+ }
+
for (delay = 0; delay < wait; delay++) {
- if (result->color == dc2c->color) {
- dc2c->next_result++;
- if (dc2c->next_result == dc2c->result_size) {
- dc2c->next_result = 0;
- dc2c->color = dc2c->color ? 0 : 1;
- }
+ if (result->color == color) {
if (result->error) {
err = result->error;
if (err != ERR_ECMDUNKNOWN ||
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 060dd3922974..973dade2d07f 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -3312,13 +3312,14 @@ jme_resume(struct device *dev)
jme_reset_phy_processor(jme);
jme_phy_calibration(jme);
jme_phy_setEA(jme);
- jme_start_irq(jme);
netif_device_attach(netdev);
atomic_inc(&jme->link_changing);
jme_reset_link(jme);
+ jme_start_irq(jme);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index ed622fa29dfa..a4ac6fedac75 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3404,7 +3404,7 @@ static int mvneta_probe(struct platform_device *pdev)
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
- dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
err = register_netdev(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 038f9ce391e6..1494997c4f7e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
.enable = mlx4_en_phc_enable,
};
+#define MLX4_EN_WRAP_AROUND_SEC 10ULL
+
+/* This function calculates the max shift that enables the user range
+ * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
+ */
+static u32 freq_to_shift(u16 freq)
+{
+ u32 freq_khz = freq * 1000;
+ u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
+ u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
+ max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
+ /* calculate max possible multiplier in order to fit in 64bit */
+ u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
+
+ /* This comes from the reverse of clocksource_khz2mult */
+ return ilog2(div_u64(max_mul * freq_khz, 1000000));
+}
+
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
struct mlx4_dev *dev = mdev->dev;
@@ -254,12 +272,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
memset(&mdev->cycles, 0, sizeof(mdev->cycles));
mdev->cycles.read = mlx4_en_read_clock;
mdev->cycles.mask = CLOCKSOURCE_MASK(48);
- /* Using shift to make calculation more accurate. Since current HW
- * clock frequency is 427 MHz, and cycles are given using a 48 bits
- * register, the biggest shift when calculating using u64, is 14
- * (max_cycles * multiplier < 2^64)
- */
- mdev->cycles.shift = 14;
+ mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
mdev->cycles.mult =
clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
mdev->nominal_c_mult = mdev->cycles.mult;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7869f97de5da..67e9633ea9c7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2381,8 +2381,6 @@ out:
/* set offloads */
priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
- priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
}
static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2393,8 +2391,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
/* unset offloads */
priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
- priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
- priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -3020,6 +3016,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->rss_hash_fn = ETH_RSS_HASH_TOP;
}
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ dev->features |= NETIF_F_GSO_UDP_TUNNEL;
+ }
+
mdev->pndev[port] = dev;
mdev->upper[port] = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index ee99e67187f5..3904b5fc0b7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -238,11 +238,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->collisions = 0;
stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
- stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+ stats->rx_over_errors = 0;
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
stats->rx_frame_errors = 0;
stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
- stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+ stats->rx_missed_errors = 0;
stats->tx_aborted_errors = 0;
stats->tx_carrier_errors = 0;
stats->tx_fifo_errors = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index cad6c44df91c..d314d96dcb1c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3132,7 +3132,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
case QP_TRANS_RTS2RTS:
case QP_TRANS_SQD2SQD:
case QP_TRANS_SQD2RTS:
- if (slave != mlx4_master_func_num(dev))
+ if (slave != mlx4_master_func_num(dev)) {
if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
@@ -3151,6 +3151,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
if (qp_ctx->alt_path.mgid_index >= num_gids)
return -EINVAL;
}
+ }
break;
default:
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 1e52db32c73d..1203d892e842 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -746,7 +746,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
int eqn_not_used;
- int irqn;
+ unsigned int irqn;
int err;
u32 i;
@@ -800,7 +800,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
void *in;
void *cqc;
int inlen;
- int irqn_not_used;
+ unsigned int irqn_not_used;
int eqn;
int err;
@@ -1504,7 +1504,7 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
int eqn_not_used;
- int irqn;
+ unsigned int irqn;
int err;
err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 4ac8d4cc4973..6cf6d93d8831 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -568,7 +568,8 @@ static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
mlx5_irq_clear_affinity_hint(mdev, i);
}
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
+ unsigned int *irqn)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
struct mlx5_eq *eq, *n;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 4365c8bccc6d..605f6410f867 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -61,6 +61,8 @@ struct mlxsw_sp {
#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
unsigned int interval; /* ms */
} fdb_notify;
+#define MLXSW_SP_MIN_AGEING_TIME 10
+#define MLXSW_SP_MAX_AGEING_TIME 1000000
#define MLXSW_SP_DEFAULT_AGEING_TIME 300
u32 ageing_time;
struct {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 617fb22b5d81..d4c4c2b5156c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -45,6 +45,7 @@
#include <linux/if_bridge.h>
#include <linux/workqueue.h>
#include <linux/jiffies.h>
+#include <linux/rtnetlink.h>
#include <net/switchdev.h>
#include "spectrum.h"
@@ -231,8 +232,13 @@ static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
+ ageing_time > MLXSW_SP_MAX_AGEING_TIME)
+ return -ERANGE;
+ else
+ return 0;
+ }
return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
}
@@ -812,6 +818,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
+ rtnl_lock();
do {
mlxsw_reg_sfn_pack(sfn_pl);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
@@ -824,6 +831,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
} while (num_rec);
+ rtnl_unlock();
kfree(sfn_pl);
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 46bbea8e023c..55007f1e6bbc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -566,6 +566,7 @@ struct qlcnic_adapter_stats {
u64 tx_dma_map_error;
u64 spurious_intr;
u64 mac_filter_limit_overrun;
+ u64 mbx_spurious_intr;
};
/*
@@ -1099,7 +1100,7 @@ struct qlcnic_mailbox {
unsigned long status;
spinlock_t queue_lock; /* Mailbox queue lock */
spinlock_t aen_lock; /* Mailbox response/AEN lock */
- atomic_t rsp_status;
+ u32 rsp_status;
u32 num_cmds;
};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 37a731be7d39..f9640d5ce6ba 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -491,7 +491,7 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
{
- atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+ mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
complete(&mbx->completion);
}
@@ -510,7 +510,7 @@ static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
} else {
- if (atomic_read(&mbx->rsp_status) != rsp_status)
+ if (mbx->rsp_status != rsp_status)
qlcnic_83xx_notify_mbx_response(mbx);
}
out:
@@ -1023,7 +1023,7 @@ static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
} else {
- if (atomic_read(&mbx->rsp_status) != rsp_status)
+ if (mbx->rsp_status != rsp_status)
qlcnic_83xx_notify_mbx_response(mbx);
}
}
@@ -2338,9 +2338,9 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
{
+ u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
struct qlcnic_adapter *adapter = data;
struct qlcnic_mailbox *mbx;
- u32 mask, resp, event;
unsigned long flags;
mbx = adapter->ahw->mailbox;
@@ -2350,10 +2350,14 @@ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
goto out;
event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
- if (event & QLCNIC_MBX_ASYNC_EVENT)
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
- else
- qlcnic_83xx_notify_mbx_response(mbx);
+ } else {
+ if (mbx->rsp_status != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ else
+ adapter->stats.mbx_spurious_intr++;
+ }
out:
mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
@@ -4050,10 +4054,10 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
struct qlcnic_adapter *adapter = mbx->adapter;
const struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
struct device *dev = &adapter->pdev->dev;
- atomic_t *rsp_status = &mbx->rsp_status;
struct list_head *head = &mbx->cmd_q;
struct qlcnic_hardware_context *ahw;
struct qlcnic_cmd_args *cmd = NULL;
+ unsigned long flags;
ahw = adapter->ahw;
@@ -4063,7 +4067,9 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
return;
}
- atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+ spin_lock_irqsave(&mbx->aen_lock, flags);
+ mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
spin_lock(&mbx->queue_lock);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 494e8105adee..0a2318cad34d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -59,7 +59,8 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
QLC_OFF(stats.mac_filter_limit_overrun)},
{"spurious intr", QLC_SIZEOF(stats.spurious_intr),
QLC_OFF(stats.spurious_intr)},
-
+ {"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr),
+ QLC_OFF(stats.mbx_spurious_intr)},
};
static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 997976426799..b28e73ea2c25 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1648,7 +1648,18 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
return;
}
skb_reserve(new_skb, NET_IP_ALIGN);
+
+ pci_dma_sync_single_for_cpu(qdev->pdev,
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
+
memcpy(skb_put(new_skb, length), skb->data, length);
+
+ pci_dma_sync_single_for_device(qdev->pdev,
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
skb = new_skb;
/* Frame error, so drop the packet. */
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 689a4a5c8dcf..1ef03939d25f 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -811,7 +811,7 @@ qcaspi_netdev_setup(struct net_device *dev)
dev->netdev_ops = &qcaspi_netdev_ops;
qcaspi_set_ethtool_ops(dev);
dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
- dev->flags = IFF_MULTICAST;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->tx_queue_len = 100;
qca = netdev_priv(dev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6a8fc0f341ff..36fc9427418f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1185,11 +1185,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
break;
sh_eth_set_receive_align(skb);
- /* RX descriptor */
- rxdesc = &mdp->rx_ring[i];
/* The size of the buffer is a multiple of 32 bytes. */
buf_len = ALIGN(mdp->rx_buf_sz, 32);
- rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, dma_addr)) {
@@ -1197,6 +1194,10 @@ static void sh_eth_ring_format(struct net_device *ndev)
break;
}
mdp->rx_skbuff[i] = skb;
+
+ /* RX descriptor */
+ rxdesc = &mdp->rx_ring[i];
+ rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
@@ -1212,7 +1213,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
/* Mark the last entry as wrapping the ring. */
- rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE);
+ if (rxdesc)
+ rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE);
memset(mdp->tx_ring, 0, tx_ringsize);
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index e9f2349e98bc..2b34622a4bfe 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -239,6 +239,7 @@ struct rocker {
struct {
u64 id;
} hw;
+ unsigned long ageing_time;
spinlock_t cmd_ring_lock; /* for cmd ring accesses */
struct rocker_dma_ring_info cmd_ring;
struct rocker_dma_ring_info event_ring;
@@ -3531,12 +3532,14 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
info.addr = lw->addr;
info.vid = lw->vid;
+ rtnl_lock();
if (learned && removing)
call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
lw->rocker_port->dev, &info.info);
else if (learned && !removing)
call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
lw->rocker_port->dev, &info.info);
+ rtnl_unlock();
rocker_port_kfree(lw->trans, work);
}
@@ -3702,7 +3705,7 @@ static void rocker_fdb_cleanup(unsigned long data)
struct rocker_port *rocker_port;
struct rocker_fdb_tbl_entry *entry;
struct hlist_node *tmp;
- unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
+ unsigned long next_timer = jiffies + rocker->ageing_time;
unsigned long expires;
unsigned long lock_flags;
int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
@@ -4365,8 +4368,12 @@ static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
struct switchdev_trans *trans,
u32 ageing_time)
{
+ struct rocker *rocker = rocker_port->rocker;
+
if (!switchdev_trans_ph_prepare(trans)) {
rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
+ if (rocker_port->ageing_time < rocker->ageing_time)
+ rocker->ageing_time = rocker_port->ageing_time;
mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
}
@@ -5204,10 +5211,13 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_init_tbls;
}
+ rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
(unsigned long) rocker);
mod_timer(&rocker->fdb_cleanup_timer, jiffies);
+ rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
+
err = rocker_probe_ports(rocker);
if (err) {
dev_err(&pdev->dev, "failed to probe ports\n");
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 9066d7a8483c..f96c6b3606f2 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -2107,7 +2107,7 @@ static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp,
dd = &lp->tx_descs[lp->tx_next];
/* Set DMA Descriptor fields */
- dd->des0 = dma_handle;
+ dd->des0 = dma_handle + consumed_size;
dd->des1 = 0;
dd->des2 = dma_size;
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 696852eb23c3..7a3f990c1935 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -430,16 +430,6 @@ static int irtty_open(struct tty_struct *tty)
/* Module stuff handled via irda_ldisc.owner - Jean II */
- /* First make sure we're not already connected. */
- if (tty->disc_data != NULL) {
- priv = tty->disc_data;
- if (priv && priv->magic == IRTTY_MAGIC) {
- ret = -EEXIST;
- goto out;
- }
- tty->disc_data = NULL; /* ### */
- }
-
/* stop the underlying driver */
irtty_stop_receiver(tty, TRUE);
if (tty->ops->stop)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 0fc521941c71..159a68782bec 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -760,6 +760,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
if (copylen > good_linear)
copylen = good_linear;
+ else if (copylen < ETH_HLEN)
+ copylen = ETH_HLEN;
linear = copylen;
i = *from;
iov_iter_advance(&i, copylen);
@@ -769,10 +771,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
if (!zerocopy) {
copylen = len;
- if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > good_linear)
+ linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
+ if (linear > good_linear)
linear = good_linear;
- else
- linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
+ else if (linear < ETH_HLEN)
+ linear = ETH_HLEN;
}
skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 47b711739ba9..e6cefd0e3262 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -845,6 +845,11 @@ static void decode_rxts(struct dp83640_private *dp83640,
struct skb_shared_hwtstamps *shhwtstamps = NULL;
struct sk_buff *skb;
unsigned long flags;
+ u8 overflow;
+
+ overflow = (phy_rxts->ns_hi >> 14) & 0x3;
+ if (overflow)
+ pr_debug("rx timestamp queue overflow, count %d\n", overflow);
spin_lock_irqsave(&dp83640->rx_lock, flags);
@@ -887,6 +892,7 @@ static void decode_txts(struct dp83640_private *dp83640,
struct skb_shared_hwtstamps shhwtstamps;
struct sk_buff *skb;
u64 ns;
+ u8 overflow;
/* We must already have the skb that triggered this. */
@@ -896,6 +902,17 @@ static void decode_txts(struct dp83640_private *dp83640,
pr_debug("have timestamp but tx_queue empty\n");
return;
}
+
+ overflow = (phy_txts->ns_hi >> 14) & 0x3;
+ if (overflow) {
+ pr_debug("tx timestamp queue overflow, count %d\n", overflow);
+ while (skb) {
+ skb_complete_tx_timestamp(skb, NULL);
+ skb = skb_dequeue(&dp83640->tx_queue);
+ }
+ return;
+ }
+
ns = phy2txts(phy_txts);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(ns);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 9a863c6a6a33..174e06ec7c2f 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -567,7 +567,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct ppp_file *pf = file->private_data;
+ struct ppp_file *pf;
struct ppp *ppp;
int err = -EFAULT, val, val2, i;
struct ppp_idle idle;
@@ -577,9 +577,14 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
void __user *argp = (void __user *)arg;
int __user *p = argp;
- if (!pf)
- return ppp_unattached_ioctl(current->nsproxy->net_ns,
- pf, file, cmd, arg);
+ mutex_lock(&ppp_mutex);
+
+ pf = file->private_data;
+ if (!pf) {
+ err = ppp_unattached_ioctl(current->nsproxy->net_ns,
+ pf, file, cmd, arg);
+ goto out;
+ }
if (cmd == PPPIOCDETACH) {
/*
@@ -594,7 +599,6 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
* this fd and reopening /dev/ppp.
*/
err = -EINVAL;
- mutex_lock(&ppp_mutex);
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
rtnl_lock();
@@ -608,15 +612,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
} else
pr_warn("PPPIOCDETACH file->f_count=%ld\n",
atomic_long_read(&file->f_count));
- mutex_unlock(&ppp_mutex);
- return err;
+ goto out;
}
if (pf->kind == CHANNEL) {
struct channel *pch;
struct ppp_channel *chan;
- mutex_lock(&ppp_mutex);
pch = PF_TO_CHANNEL(pf);
switch (cmd) {
@@ -638,17 +640,16 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err = chan->ops->ioctl(chan, cmd, arg);
up_read(&pch->chan_sem);
}
- mutex_unlock(&ppp_mutex);
- return err;
+ goto out;
}
if (pf->kind != INTERFACE) {
/* can't happen */
pr_err("PPP: not interface or channel??\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
- mutex_lock(&ppp_mutex);
ppp = PF_TO_PPP(pf);
switch (cmd) {
case PPPIOCSMRU:
@@ -823,7 +824,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
default:
err = -ENOTTY;
}
+
+out:
mutex_unlock(&ppp_mutex);
+
return err;
}
@@ -836,7 +840,6 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct ppp_net *pn;
int __user *p = (int __user *)arg;
- mutex_lock(&ppp_mutex);
switch (cmd) {
case PPPIOCNEWUNIT:
/* Create a new ppp unit */
@@ -886,7 +889,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
default:
err = -ENOTTY;
}
- mutex_unlock(&ppp_mutex);
+
return err;
}
@@ -2290,7 +2293,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
pch->ppp = NULL;
pch->chan = chan;
- pch->chan_net = net;
+ pch->chan_net = get_net(net);
chan->ppp = pch;
init_ppp_file(&pch->file, CHANNEL);
pch->file.hdrlen = chan->hdrlen;
@@ -2387,6 +2390,8 @@ ppp_unregister_channel(struct ppp_channel *chan)
spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&pn->all_channels_lock);
+ put_net(pch->chan_net);
+ pch->chan_net = NULL;
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
@@ -2803,6 +2808,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
out2:
mutex_unlock(&pn->all_ppp_mutex);
+ rtnl_unlock();
free_netdev(dev);
out1:
*retp = ret;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 0a37f840fcc5..4e0068e775f9 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -395,6 +395,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
if (!__pppoe_xmit(sk_pppox(relay_po), skb))
goto abort_put;
+
+ sock_put(sk_pppox(relay_po));
} else {
if (sock_queue_rcv_skb(sk, skb))
goto abort_kfree;
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 597c53e0a2ec..f7e8c79349ad 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
return i < MAX_CALLID;
}
-static int add_chan(struct pppox_sock *sock)
+static int add_chan(struct pppox_sock *sock,
+ struct pptp_addr *sa)
{
static int call_id;
spin_lock(&chan_lock);
- if (!sock->proto.pptp.src_addr.call_id) {
+ if (!sa->call_id) {
call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
if (call_id == MAX_CALLID) {
call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
if (call_id == MAX_CALLID)
goto out_err;
}
- sock->proto.pptp.src_addr.call_id = call_id;
- } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
+ sa->call_id = call_id;
+ } else if (test_bit(sa->call_id, callid_bitmap)) {
goto out_err;
+ }
- set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
- rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
+ sock->proto.pptp.src_addr = *sa;
+ set_bit(sa->call_id, callid_bitmap);
+ rcu_assign_pointer(callid_sock[sa->call_id], sock);
spin_unlock(&chan_lock);
return 0;
@@ -416,7 +419,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
struct sock *sk = sock->sk;
struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
struct pppox_sock *po = pppox_sk(sk);
- struct pptp_opt *opt = &po->proto.pptp;
int error = 0;
if (sockaddr_len < sizeof(struct sockaddr_pppox))
@@ -424,10 +426,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
- opt->src_addr = sp->sa_addr.pptp;
- if (add_chan(po))
+ if (sk->sk_state & PPPOX_DEAD) {
+ error = -EALREADY;
+ goto out;
+ }
+
+ if (sk->sk_state & PPPOX_BOUND) {
error = -EBUSY;
+ goto out;
+ }
+
+ if (add_chan(po, &sp->sa_addr.pptp))
+ error = -EBUSY;
+ else
+ sk->sk_state |= PPPOX_BOUND;
+out:
release_sock(sk);
return error;
}
@@ -498,7 +512,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
}
opt->dst_addr = sp->sa_addr.pptp;
- sk->sk_state = PPPOX_CONNECTED;
+ sk->sk_state |= PPPOX_CONNECTED;
end:
release_sock(sk);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 01f08a7751f7..e7034c55e796 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -280,7 +280,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
struct net_device *ndev = dev_id;
struct rionet_private *rnet = netdev_priv(ndev);
- spin_lock(&rnet->lock);
+ spin_lock(&rnet->tx_lock);
if (netif_msg_intr(rnet))
printk(KERN_INFO
@@ -299,7 +299,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
netif_wake_queue(ndev);
- spin_unlock(&rnet->lock);
+ spin_unlock(&rnet->tx_lock);
}
static int rionet_open(struct net_device *ndev)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 651d35ea22c5..59fefca74263 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1845,10 +1845,10 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
struct team *team = netdev_priv(dev);
struct team_port *port;
- rcu_read_lock();
- list_for_each_entry_rcu(port, &team->port_list, list)
+ mutex_lock(&team->lock);
+ list_for_each_entry(port, &team->port_list, list)
vlan_vid_del(port->dev, proto, vid);
- rcu_read_unlock();
+ mutex_unlock(&team->lock);
return 0;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2228a539184a..de0d15e03e03 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -621,7 +621,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
/* Re-attach the filter to persist device */
if (!skip_filter && (tun->filter_attached == true)) {
- err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ err = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
+ lockdep_rtnl_is_held());
if (!err)
goto out;
}
@@ -1000,7 +1001,6 @@ static void tun_net_init(struct net_device *dev)
/* Zero header length */
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
- dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
break;
case IFF_TAP:
@@ -1012,7 +1012,6 @@ static void tun_net_init(struct net_device *dev)
eth_hw_addr_random(dev);
- dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
break;
}
}
@@ -1463,6 +1462,8 @@ static void tun_setup(struct net_device *dev)
dev->ethtool_ops = &tun_ethtool_ops;
dev->destructor = tun_free_netdev;
+ /* We prefer our own queue length */
+ dev->tx_queue_len = TUN_READQ_SIZE;
}
/* Trivial set of netlink ops to allow deleting tun or tap
@@ -1804,7 +1805,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- sk_detach_filter(tfile->socket.sk);
+ __sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held());
}
tun->filter_attached = false;
@@ -1817,7 +1818,8 @@ static int tun_attach_filter(struct tun_struct *tun)
for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
+ lockdep_rtnl_is_held());
if (ret) {
tun_detach_filter(tun, i);
return ret;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 3da70bf9936a..7cba2c3759df 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -160,6 +160,12 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
info->u = header.usb_cdc_union_desc;
info->header = header.usb_cdc_header_desc;
info->ether = header.usb_cdc_ether_desc;
+ if (!info->u) {
+ if (rndis)
+ goto skip;
+ else /* in that case a quirk is mandatory */
+ goto bad_desc;
+ }
/* we need a master/control interface (what we're
* probed with) and a slave/data interface; union
* descriptors sort this all out.
@@ -256,7 +262,7 @@ skip:
goto bad_desc;
}
- } else if (!info->header || !info->u || (!rndis && !info->ether)) {
+ } else if (!info->header || (!rndis && !info->ether)) {
dev_dbg(&intf->dev, "missing cdc %s%s%sdescriptor\n",
info->header ? "" : "header ",
info->u ? "" : "union ",
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index e8a1144c5a8b..8c2bb77db049 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -794,7 +794,11 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
- /* reset data interface */
+ /* Reset data interface. Some devices will not reset properly
+ * unless they are configured first. Toggle the altsetting to
+ * force a reset
+ */
+ usb_set_interface(dev->udev, iface_no, data_altsetting);
temp = usb_set_interface(dev->udev, iface_no, 0);
if (temp) {
dev_dbg(&intf->dev, "set interface failed\n");
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 5fccc5a8153f..a34f491224c1 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -492,6 +492,7 @@ static const struct usb_device_id products[] = {
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
+ {QMI_FIXED_INTF(0x05c6, 0x6001, 3)}, /* 4G LTE usb-modem U901 */
{QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
{QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
{QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
@@ -698,6 +699,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
+ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
@@ -717,8 +719,10 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1199, 0x9070, 8)}, /* Sierra Wireless MC74xx/EM74xx */
{QMI_FIXED_INTF(0x1199, 0x9070, 10)}, /* Sierra Wireless MC74xx/EM74xx */
- {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */
- {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 0744bf2ef2d6..c2ea4e5666fb 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1766,6 +1766,13 @@ out3:
if (info->unbind)
info->unbind (dev, udev);
out1:
+ /* subdrivers must undo all they did in bind() if they
+ * fail it, but we may fail later and a deferred kevent
+ * may trigger an error resubmitting itself and, worse,
+ * schedule a timer. So we kill it all just in case.
+ */
+ cancel_work_sync(&dev->kevent);
+ del_timer_sync(&dev->delay);
free_netdev(net);
out:
return status;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 0a242b200df4..903bda437839 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -114,20 +114,23 @@ static struct dst_ops vrf_dst_ops = {
#if IS_ENABLED(CONFIG_IPV6)
static bool check_ipv6_frame(const struct sk_buff *skb)
{
- const struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb->data;
- size_t hlen = sizeof(*ipv6h);
+ const struct ipv6hdr *ipv6h;
+ struct ipv6hdr _ipv6h;
bool rc = true;
- if (skb->len < hlen)
+ ipv6h = skb_header_pointer(skb, 0, sizeof(_ipv6h), &_ipv6h);
+ if (!ipv6h)
goto out;
if (ipv6h->nexthdr == NEXTHDR_ICMP) {
const struct icmp6hdr *icmph;
+ struct icmp6hdr _icmph;
- if (skb->len < hlen + sizeof(*icmph))
+ icmph = skb_header_pointer(skb, sizeof(_ipv6h),
+ sizeof(_icmph), &_icmph);
+ if (!icmph)
goto out;
- icmph = (struct icmp6hdr *)(skb->data + sizeof(*ipv6h));
switch (icmph->icmp6_type) {
case NDISC_ROUTER_SOLICITATION:
case NDISC_ROUTER_ADVERTISEMENT:
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ba363cedef80..3c0df70e2f53 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1306,8 +1306,10 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
gbp = (struct vxlanhdr_gbp *)vxh;
md->gbp = ntohs(gbp->policy_id);
- if (tun_dst)
+ if (tun_dst) {
tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
+ tun_dst->u.tun_info.options_len = sizeof(*md);
+ }
if (gbp->dont_learn)
md->gbp |= VXLAN_GBP_DONT_LEARN;
@@ -1984,11 +1986,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
vxlan->cfg.port_max, true);
if (info) {
- if (info->key.tun_flags & TUNNEL_CSUM)
- flags |= VXLAN_F_UDP_CSUM;
- else
- flags &= ~VXLAN_F_UDP_CSUM;
-
ttl = info->key.ttl;
tos = info->key.tos;
@@ -2003,8 +2000,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto drop;
sk = vxlan->vn4_sock->sock->sk;
- if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
- df = htons(IP_DF);
+ if (info) {
+ if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
+ df = htons(IP_DF);
+
+ if (info->key.tun_flags & TUNNEL_CSUM)
+ flags |= VXLAN_F_UDP_CSUM;
+ else
+ flags &= ~VXLAN_F_UDP_CSUM;
+ }
memset(&fl4, 0, sizeof(fl4));
fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
@@ -2102,6 +2106,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
return;
}
+ if (info) {
+ if (info->key.tun_flags & TUNNEL_CSUM)
+ flags &= ~VXLAN_F_UDP_ZERO_CSUM6_TX;
+ else
+ flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
+ }
+
ttl = ttl ? : ip6_dst_hoplimit(ndst);
err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
0, ttl, src_port, dst_port, htonl(vni << 8), md,
@@ -2751,7 +2762,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
struct vxlan_config *conf)
{
struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
- struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
struct vxlan_rdst *dst = &vxlan->default_dst;
unsigned short needed_headroom = ETH_HLEN;
int err;
@@ -2817,9 +2828,15 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
if (!vxlan->cfg.age_interval)
vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
- if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET,
- vxlan->cfg.dst_port, vxlan->flags))
+ list_for_each_entry(tmp, &vn->vxlan_list, next) {
+ if (tmp->cfg.vni == conf->vni &&
+ (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
+ tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
+ tmp->cfg.dst_port == vxlan->cfg.dst_port &&
+ (tmp->flags & VXLAN_F_RCV_FLAGS) ==
+ (vxlan->flags & VXLAN_F_RCV_FLAGS))
return -EEXIST;
+ }
dev->ethtool_ops = &vxlan_ethtool_ops;
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 44541dbc5c28..69b994f3b8c5 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2516,7 +2516,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->mem_start = card->phys_mem
+ BUF_OFFSET ( txBuffer[i][0][0]);
dev->mem_end = card->phys_mem
- + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]);
+ + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]);
dev->base_addr = card->pci_conf;
dev->irq = card->irq;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index cc81482c934d..113a43fca9cf 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -403,10 +403,9 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
if (match) {
if (AR_SREV_9287(ah)) {
- /* FIXME: array overrun? */
for (i = 0; i < numXpdGains; i++) {
minPwrT4[i] = data_9287[idxL].pwrPdg[i][0];
- maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4];
+ maxPwrT4[i] = data_9287[idxL].pwrPdg[i][intercepts - 1];
ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
data_9287[idxL].pwrPdg[i],
data_9287[idxL].vpdPdg[i],
@@ -416,7 +415,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
} else if (eeprom_4k) {
for (i = 0; i < numXpdGains; i++) {
minPwrT4[i] = data_4k[idxL].pwrPdg[i][0];
- maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4];
+ maxPwrT4[i] = data_4k[idxL].pwrPdg[i][intercepts - 1];
ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
data_4k[idxL].pwrPdg[i],
data_4k[idxL].vpdPdg[i],
@@ -426,7 +425,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
} else {
for (i = 0; i < numXpdGains; i++) {
minPwrT4[i] = data_def[idxL].pwrPdg[i][0];
- maxPwrT4[i] = data_def[idxL].pwrPdg[i][4];
+ maxPwrT4[i] = data_def[idxL].pwrPdg[i][intercepts - 1];
ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
data_def[idxL].pwrPdg[i],
data_def[idxL].vpdPdg[i],
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index e18629a16fb0..0961f33de05e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -1154,6 +1154,9 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
priv->ucode_loaded = false;
iwl_trans_stop_device(priv->trans);
+ ret = iwl_trans_start_hw(priv->trans);
+ if (ret)
+ goto out;
priv->wowlan = true;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index d906fa13ba97..610c442c7ab2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -106,7 +106,7 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
sizeof(tx_ant_cmd), &tx_ant_cmd);
}
-static void iwl_free_fw_paging(struct iwl_mvm *mvm)
+void iwl_free_fw_paging(struct iwl_mvm *mvm)
{
int i;
@@ -126,6 +126,8 @@ static void iwl_free_fw_paging(struct iwl_mvm *mvm)
get_order(mvm->fw_paging_db[i].fw_paging_size));
}
kfree(mvm->trans->paging_download_buf);
+ mvm->trans->paging_download_buf = NULL;
+
memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 4bde2d027dcd..244e26c26821 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -1190,6 +1190,9 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+/* Paging */
+void iwl_free_fw_paging(struct iwl_mvm *mvm);
+
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 13c97f665ba8..c3adf2bcdc85 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -645,6 +645,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
+ iwl_free_fw_paging(mvm);
+
iwl_mvm_tof_clean(mvm);
ieee80211_free_hw(mvm->hw);
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index d6e0c1b5c20c..8215d7405f64 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1267,6 +1267,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
return -EBUSY;
}
+ /* we don't support "match all" in the firmware */
+ if (!req->n_match_sets)
+ return -EOPNOTSUPP;
+
ret = iwl_mvm_check_running_scans(mvm, type);
if (ret)
return ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index c652a66be803..6743edf43aa8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -421,6 +421,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
return -1;
}
+ /*
+ * Increase the pending frames counter, so that later when a reply comes
+ * in and the counter is decreased - we don't start getting negative
+ * values.
+ * Note that we don't need to make sure it isn't agg'd, since we're
+ * TXing non-sta
+ */
+ atomic_inc(&mvm->pending_frames[sta_id]);
+
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 639761fb2bfb..d58c094f2f04 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -384,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
@@ -401,10 +402,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 90283453073c..8c7204738aa3 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
*
* Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -924,9 +926,16 @@ monitor:
if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
trans_pcie->fw_mon_phys >> dest->base_shift);
- iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
- (trans_pcie->fw_mon_phys +
- trans_pcie->fw_mon_size) >> dest->end_shift);
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
+ (trans_pcie->fw_mon_phys +
+ trans_pcie->fw_mon_size - 256) >>
+ dest->end_shift);
+ else
+ iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
+ (trans_pcie->fw_mon_phys +
+ trans_pcie->fw_mon_size) >>
+ dest->end_shift);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index f46c9d7f6528..7f471bff435c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -801,7 +801,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
hw_queue);
if (rx_remained_cnt == 0)
return;
-
+ buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
+ rtlpci->rx_ring[rxring_idx].idx];
+ pdesc = (struct rtl_rx_desc *)skb->data;
} else { /* rx descriptor */
pdesc = &rtlpci->rx_ring[rxring_idx].desc[
rtlpci->rx_ring[rxring_idx].idx];
@@ -824,13 +826,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
if (unlikely(!new_skb))
goto no_new;
- if (rtlpriv->use_new_trx_flow) {
- buffer_desc =
- &rtlpci->rx_ring[rxring_idx].buffer_desc
- [rtlpci->rx_ring[rxring_idx].idx];
- /*means rx wifi info*/
- pdesc = (struct rtl_rx_desc *)skb->data;
- }
memset(&rx_status , 0 , sizeof(rx_status));
rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
&rx_status, (u8 *)pdesc, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index a62bf0a65c32..5be34118e0af 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -351,7 +351,6 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
case COUNTRY_CODE_SPAIN:
case COUNTRY_CODE_FRANCE:
case COUNTRY_CODE_ISRAEL:
- case COUNTRY_CODE_WORLD_WIDE_13:
return &rtl_regdom_12_13;
case COUNTRY_CODE_MKK:
case COUNTRY_CODE_MKK1:
@@ -360,6 +359,7 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
return &rtl_regdom_14_60_64;
case COUNTRY_CODE_GLOBAL_DOMAIN:
return &rtl_regdom_14;
+ case COUNTRY_CODE_WORLD_WIDE_13:
case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL:
return &rtl_regdom_12_13_5g_all;
default:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index 11344121c55e..47e32cb0ec1a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -88,8 +88,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
u8 tid;
rtl8188ee_bt_reg_init(hw);
- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
-
rtlpriv->dm.dm_initialgain_enable = 1;
rtlpriv->dm.dm_flag = 0;
rtlpriv->dm.disable_framebursting = 0;
@@ -138,6 +136,11 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+ rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+ rtlpriv->cfg->mod_params->sw_crypto =
+ rtlpriv->cfg->mod_params->sw_crypto;
+ rtlpriv->cfg->mod_params->disable_watchdog =
+ rtlpriv->cfg->mod_params->disable_watchdog;
if (rtlpriv->cfg->mod_params->disable_watchdog)
pr_info("watchdog disabled\n");
if (!rtlpriv->psc.inactiveps)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index de6cb6c3a48c..4780bdc63b2b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -139,6 +139,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+ rtlpriv->cfg->mod_params->sw_crypto =
+ rtlpriv->cfg->mod_params->sw_crypto;
if (!rtlpriv->psc.inactiveps)
pr_info("rtl8192ce: Power Save off (module option)\n");
if (!rtlpriv->psc.fwctrl_lps)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index fd4a5353d216..7c6f7f0d18c6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -65,6 +65,8 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
+ rtlpriv->cfg->mod_params->sw_crypto =
+ rtlpriv->cfg->mod_params->sw_crypto;
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index b19d0398215f..c6e09a19de1a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -376,8 +376,8 @@ module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
+MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
+MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index e1fd27c888bf..31baca41ac2f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -187,6 +187,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+ rtlpriv->cfg->mod_params->sw_crypto =
+ rtlpriv->cfg->mod_params->sw_crypto;
if (!rtlpriv->psc.inactiveps)
pr_info("Power Save off (module option)\n");
if (!rtlpriv->psc.fwctrl_lps)
@@ -425,8 +427,8 @@ module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
+MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
+MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index 3859b3e3d158..ff49a8c0ff61 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -150,6 +150,11 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+ rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+ rtlpriv->cfg->mod_params->sw_crypto =
+ rtlpriv->cfg->mod_params->sw_crypto;
+ rtlpriv->cfg->mod_params->disable_watchdog =
+ rtlpriv->cfg->mod_params->disable_watchdog;
if (rtlpriv->cfg->mod_params->disable_watchdog)
pr_info("watchdog disabled\n");
rtlpriv->psc.reg_fwctrl_lps = 3;
@@ -267,6 +272,8 @@ static struct rtl_mod_params rtl8723e_mod_params = {
.swctrl_lps = false,
.fwctrl_lps = true,
.debug = DBG_EMERG,
+ .msi_support = false,
+ .disable_watchdog = false,
};
static struct rtl_hal_cfg rtl8723e_hal_cfg = {
@@ -383,12 +390,14 @@ module_param_named(debug, rtl8723e_mod_params.debug, int, 0444);
module_param_named(ips, rtl8723e_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl8723e_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl8723e_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(msi, rtl8723e_mod_params.msi_support, bool, 0444);
module_param_named(disable_watchdog, rtl8723e_mod_params.disable_watchdog,
bool, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
+MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index d091f1d5f91e..a78eaeda0008 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -93,7 +93,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
rtl8723be_bt_reg_init(hw);
- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
rtlpriv->dm.dm_initialgain_enable = 1;
@@ -151,6 +150,10 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+ rtlpriv->cfg->mod_params->sw_crypto =
+ rtlpriv->cfg->mod_params->sw_crypto;
+ rtlpriv->cfg->mod_params->disable_watchdog =
+ rtlpriv->cfg->mod_params->disable_watchdog;
if (rtlpriv->cfg->mod_params->disable_watchdog)
pr_info("watchdog disabled\n");
rtlpriv->psc.reg_fwctrl_lps = 3;
@@ -267,6 +270,9 @@ static struct rtl_mod_params rtl8723be_mod_params = {
.inactiveps = true,
.swctrl_lps = false,
.fwctrl_lps = true,
+ .msi_support = false,
+ .disable_watchdog = false,
+ .debug = DBG_EMERG,
};
static struct rtl_hal_cfg rtl8723be_hal_cfg = {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 142bdff4ed60..4159f9b14db6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -95,8 +95,6 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
rtl8821ae_bt_reg_init(hw);
- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
- rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear;
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
rtlpriv->dm.dm_initialgain_enable = 1;
@@ -168,12 +166,15 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
- rtlpci->msi_support = rtlpriv->cfg->mod_params->int_clear;
+ rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear;
+ rtlpriv->cfg->mod_params->sw_crypto =
+ rtlpriv->cfg->mod_params->sw_crypto;
+ rtlpriv->cfg->mod_params->disable_watchdog =
+ rtlpriv->cfg->mod_params->disable_watchdog;
if (rtlpriv->cfg->mod_params->disable_watchdog)
pr_info("watchdog disabled\n");
rtlpriv->psc.reg_fwctrl_lps = 3;
rtlpriv->psc.reg_max_lps_awakeintvl = 5;
- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
/* for ASPM, you can close aspm through
* set const_support_pciaspm = 0
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 2721cf89fb16..aac1ed3f7bb4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -531,6 +531,8 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
ieee80211_rx(hw, skb);
else
dev_kfree_skb_any(skb);
+ } else {
+ dev_kfree_skb_any(skb);
}
}
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 0305729d0986..10cf3747694d 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -207,19 +207,23 @@ static inline int __must_check wlcore_write_reg(struct wl1271 *wl, int reg,
static inline void wl1271_power_off(struct wl1271 *wl)
{
- int ret;
+ int ret = 0;
if (!test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags))
return;
- ret = wl->if_ops->power(wl->dev, false);
+ if (wl->if_ops->power)
+ ret = wl->if_ops->power(wl->dev, false);
if (!ret)
clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
}
static inline int wl1271_power_on(struct wl1271 *wl)
{
- int ret = wl->if_ops->power(wl->dev, true);
+ int ret = 0;
+
+ if (wl->if_ops->power)
+ ret = wl->if_ops->power(wl->dev, true);
if (ret == 0)
set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 236b41090827..44f059f7f34e 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -73,7 +73,10 @@
*/
#define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
-#define WSPI_MAX_NUM_OF_CHUNKS (SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
+/* Maximum number of SPI write chunks */
+#define WSPI_MAX_NUM_OF_CHUNKS \
+ ((SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) + 1)
+
struct wl12xx_spi_glue {
struct device *dev;
@@ -268,9 +271,10 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
void *buf, size_t len, bool fixed)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
- struct spi_transfer t[2 * (WSPI_MAX_NUM_OF_CHUNKS + 1)];
+ /* SPI write buffers - 2 for each chunk */
+ struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
struct spi_message m;
- u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
+ u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; /* 1 command per chunk */
u32 *cmd;
u32 chunk_len;
int i;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 7e2c43f701bc..5f47356d6942 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -335,7 +335,7 @@ static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
[ND_CMD_IMPLEMENTED] = { },
[ND_CMD_SMART] = {
.out_num = 2,
- .out_sizes = { 4, 8, },
+ .out_sizes = { 4, 128, },
},
[ND_CMD_SMART_THRESHOLD] = {
.out_num = 2,
@@ -513,10 +513,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
/* fail write commands (when read-only) */
if (read_only)
- switch (ioctl_cmd) {
- case ND_IOCTL_VENDOR:
- case ND_IOCTL_SET_CONFIG_DATA:
- case ND_IOCTL_ARS_START:
+ switch (cmd) {
+ case ND_CMD_VENDOR:
+ case ND_CMD_SET_CONFIG_DATA:
+ case ND_CMD_ARS_START:
dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
nvdimm ? nvdimm_cmd_name(cmd)
: nvdimm_bus_cmd_name(cmd));
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 0955b2cb10fe..62120c38d56b 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -77,6 +77,59 @@ static bool is_namespace_io(struct device *dev)
return dev ? dev->type == &namespace_io_device_type : false;
}
+static int is_uuid_busy(struct device *dev, void *data)
+{
+ u8 *uuid1 = data, *uuid2 = NULL;
+
+ if (is_namespace_pmem(dev)) {
+ struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
+
+ uuid2 = nspm->uuid;
+ } else if (is_namespace_blk(dev)) {
+ struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
+
+ uuid2 = nsblk->uuid;
+ } else if (is_nd_btt(dev)) {
+ struct nd_btt *nd_btt = to_nd_btt(dev);
+
+ uuid2 = nd_btt->uuid;
+ } else if (is_nd_pfn(dev)) {
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+
+ uuid2 = nd_pfn->uuid;
+ }
+
+ if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int is_namespace_uuid_busy(struct device *dev, void *data)
+{
+ if (is_nd_pmem(dev) || is_nd_blk(dev))
+ return device_for_each_child(dev, data, is_uuid_busy);
+ return 0;
+}
+
+/**
+ * nd_is_uuid_unique - verify that no other namespace has @uuid
+ * @dev: any device on a nvdimm_bus
+ * @uuid: uuid to check
+ */
+bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
+{
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
+
+ if (!nvdimm_bus)
+ return false;
+ WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
+ if (device_for_each_child(&nvdimm_bus->dev, uuid,
+ is_namespace_uuid_busy) != 0)
+ return false;
+ return true;
+}
+
bool pmem_should_map_pages(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 71805a1aa0f3..9d3974591cd6 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -275,7 +275,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
} else {
/* from init we validate */
if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
- return -EINVAL;
+ return -ENODEV;
}
/*
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 529f3f02e7b2..9521696c9385 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -134,62 +134,6 @@ int nd_region_to_nstype(struct nd_region *nd_region)
}
EXPORT_SYMBOL(nd_region_to_nstype);
-static int is_uuid_busy(struct device *dev, void *data)
-{
- struct nd_region *nd_region = to_nd_region(dev->parent);
- u8 *uuid = data;
-
- switch (nd_region_to_nstype(nd_region)) {
- case ND_DEVICE_NAMESPACE_PMEM: {
- struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
-
- if (!nspm->uuid)
- break;
- if (memcmp(uuid, nspm->uuid, NSLABEL_UUID_LEN) == 0)
- return -EBUSY;
- break;
- }
- case ND_DEVICE_NAMESPACE_BLK: {
- struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
- if (!nsblk->uuid)
- break;
- if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) == 0)
- return -EBUSY;
- break;
- }
- default:
- break;
- }
-
- return 0;
-}
-
-static int is_namespace_uuid_busy(struct device *dev, void *data)
-{
- if (is_nd_pmem(dev) || is_nd_blk(dev))
- return device_for_each_child(dev, data, is_uuid_busy);
- return 0;
-}
-
-/**
- * nd_is_uuid_unique - verify that no other namespace has @uuid
- * @dev: any device on a nvdimm_bus
- * @uuid: uuid to check
- */
-bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
-{
- struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
-
- if (!nvdimm_bus)
- return false;
- WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
- if (device_for_each_child(&nvdimm_bus->dev, uuid,
- is_namespace_uuid_busy) != 0)
- return false;
- return true;
-}
-
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 4fa916dffc91..72a2c1969646 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -636,6 +636,13 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
msi_base = be32_to_cpup(msi_map + 2);
rid_len = be32_to_cpup(msi_map + 3);
+ if (rid_base & ~map_mask) {
+ dev_err(parent_dev,
+ "Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
+ map_mask, rid_base);
+ return rid_out;
+ }
+
msi_controller_node = of_find_node_by_phandle(phandle);
matched = (masked_rid >= rid_base &&
@@ -655,7 +662,7 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
if (!matched)
return rid_out;
- rid_out = masked_rid + msi_base;
+ rid_out = masked_rid - rid_base + msi_base;
dev_dbg(dev,
"msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
dev_name(parent_dev), map_mask, rid_base, msi_base,
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 1a3556a9e9ea..ed01c0172e4a 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -32,11 +32,13 @@ int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
phys_addr_t *res_base)
{
+ phys_addr_t base;
/*
* We use __memblock_alloc_base() because memblock_alloc_base()
* panic()s on allocation failure.
*/
- phys_addr_t base = __memblock_alloc_base(size, align, end);
+ end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
+ base = __memblock_alloc_base(size, align, end);
if (!base)
return -ENOMEM;
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index d3346d23963b..89b3befc7155 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -140,6 +140,8 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
type_mask |= IORESOURCE_TYPE_BITS;
pci_bus_for_each_resource(bus, r, i) {
+ resource_size_t min_used = min;
+
if (!r)
continue;
@@ -163,12 +165,12 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
* overrides "min".
*/
if (avail.start)
- min = avail.start;
+ min_used = avail.start;
max = avail.end;
/* Ok, try it out.. */
- ret = allocate_resource(r, res, size, min, max,
+ ret = allocate_resource(r, res, size, min_used, max,
align, alignf, alignf_data);
if (ret == 0)
return 0;
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
index 8c3688046c02..923607bdabc5 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -302,7 +302,8 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
}
ret = devm_request_irq(&pdev->dev, pp->irq,
- dra7xx_pcie_msi_irq_handler, IRQF_SHARED,
+ dra7xx_pcie_msi_irq_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
"dra7-pcie-msi", pp);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index 01095e1160a4..d997d22d4231 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -522,7 +522,8 @@ static int __init exynos_add_pcie_port(struct pcie_port *pp,
ret = devm_request_irq(&pdev->dev, pp->msi_irq,
exynos_pcie_msi_irq_handler,
- IRQF_SHARED, "exynos-pcie", pp);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "exynos-pcie", pp);
if (ret) {
dev_err(&pdev->dev, "failed to request msi irq\n");
return ret;
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index 22e8224126fd..9ce7cd148c86 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -537,7 +537,8 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp,
ret = devm_request_irq(&pdev->dev, pp->msi_irq,
imx6_pcie_msi_handler,
- IRQF_SHARED, "mx6-pcie-msi", pp);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "mx6-pcie-msi", pp);
if (ret) {
dev_err(&pdev->dev, "failed to request MSI irq\n");
return ret;
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index ed34c9520a02..6153853ca9c3 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -58,11 +58,6 @@
#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
-static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
-{
- return sys->private_data;
-}
-
static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
u32 *bit_pos)
{
@@ -108,7 +103,7 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
struct pcie_port *pp;
msi = irq_data_get_msi_desc(d);
- pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
+ pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
ks_pcie = to_keystone_pcie(pp);
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
@@ -146,7 +141,7 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
u32 offset;
msi = irq_data_get_msi_desc(d);
- pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
+ pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
ks_pcie = to_keystone_pcie(pp);
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
@@ -167,7 +162,7 @@ static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
u32 offset;
msi = irq_data_get_msi_desc(d);
- pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
+ pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
ks_pcie = to_keystone_pcie(pp);
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 3018ae52e092..30323114c53c 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -1288,7 +1288,7 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
msi->irq = err;
- err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
+ err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
tegra_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index f4fa6c537448..414c33686621 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -720,14 +720,16 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
/* Two irqs are for MSI, but they are also used for non-MSI irqs */
err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
- IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ rcar_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
goto err;
}
err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
- IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ rcar_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
goto err;
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index b95b7563c052..a6cd8233e8c0 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -279,7 +279,8 @@ static int spear13xx_add_pcie_port(struct pcie_port *pp,
return -ENODEV;
}
ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
- IRQF_SHARED, "spear1340-pcie", pp);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "spear1340-pcie", pp);
if (ret) {
dev_err(dev, "failed to request irq %d\n", pp->irq);
return ret;
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 3c7a0d580b1e..4cfa46360d12 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -781,7 +781,8 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
port->irq = irq_of_parse_and_map(node, 0);
err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
- IRQF_SHARED, "xilinx-pcie", port);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "xilinx-pcie", port);
if (err) {
dev_err(dev, "unable to request irq %d\n", port->irq);
return err;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index ff538568a617..0b3e0bfa7be5 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -953,8 +953,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
{
pci_lock_rescan_remove();
- if (slot->flags & SLOT_IS_GOING_AWAY)
+ if (slot->flags & SLOT_IS_GOING_AWAY) {
+ pci_unlock_rescan_remove();
return -ENODEV;
+ }
/* configure all functions */
if (!(slot->flags & SLOT_ENABLED))
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 314db8c1047a..42d8617352ae 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4772,8 +4772,10 @@ int pci_get_new_domain_nr(void)
void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
{
static int use_dt_domains = -1;
- int domain = of_get_pci_domain_nr(parent->of_node);
+ int domain = -1;
+ if (parent)
+ domain = of_get_pci_domain_nr(parent->of_node);
/*
* Check DT domain and use_dt_domains values.
*
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 0bf82a20a0fb..48d21e0edd56 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
rpc->rpd = dev;
INIT_WORK(&rpc->dpc_handler, aer_isr);
mutex_init(&rpc->rpc_mutex);
- init_waitqueue_head(&rpc->wait_release);
/* Use PCIe bus function to store rpc into PCIe device */
set_service_data(dev, rpc);
@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
if (rpc->isr)
free_irq(dev->irq, dev);
- wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
-
+ flush_work(&rpc->dpc_handler);
aer_disable_rootport(rpc);
kfree(rpc);
set_service_data(dev, NULL);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 84420b7c9456..945c939a86c5 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -72,7 +72,6 @@ struct aer_rpc {
* recovery on the same
* root port hierarchy
*/
- wait_queue_head_t wait_release;
};
struct aer_broadcast_data {
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index fba785e9df75..4e14de0f0f98 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -811,8 +811,6 @@ void aer_isr(struct work_struct *work)
while (get_e_source(rpc, &e_src))
aer_isr_one_error(p_device, &e_src);
mutex_unlock(&rpc->rpc_mutex);
-
- wake_up(&rpc->wait_release);
}
/**
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index edb1984201e9..7aafb5fb9336 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -179,6 +179,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
u16 orig_cmd;
struct pci_bus_region region, inverted_region;
+ if (dev->non_compliant_bars)
+ return 0;
+
mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
/* No printks while decoding is disabled! */
@@ -1174,6 +1177,7 @@ void pci_msi_setup_pci_dev(struct pci_dev *dev)
int pci_setup_device(struct pci_dev *dev)
{
u32 class;
+ u16 cmd;
u8 hdr_type;
int pos = 0;
struct pci_bus_region region;
@@ -1219,6 +1223,16 @@ int pci_setup_device(struct pci_dev *dev)
/* device class may be changed after fixup */
class = dev->class >> 8;
+ if (dev->non_compliant_bars) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
+ dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
+ cmd &= ~PCI_COMMAND_IO;
+ cmd &= ~PCI_COMMAND_MEMORY;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ }
+
switch (dev->hdr_type) { /* header type */
case PCI_HEADER_TYPE_NORMAL: /* standard header */
if (class == PCI_CLASS_BRIDGE_PCI)
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index c777b97207d5..5f70fee59a94 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -53,7 +53,7 @@ struct pcifront_device {
};
struct pcifront_sd {
- int domain;
+ struct pci_sysdata sd;
struct pcifront_device *pdev;
};
@@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
unsigned int domain, unsigned int bus,
struct pcifront_device *pdev)
{
- sd->domain = domain;
+ /* Because we do not expose that information via XenBus. */
+ sd->sd.node = first_online_node;
+ sd->sd.domain = domain;
sd->pdev = pdev;
}
@@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
domain, bus);
- bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
- sd = kmalloc(sizeof(*sd), GFP_KERNEL);
+ bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
+ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
if (!bus_entry || !sd) {
err = -ENOMEM;
goto err_out;
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 4c2fa05b4589..944674ee3464 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -56,6 +56,7 @@ struct db1x_pcmcia_sock {
int stschg_irq; /* card-status-change irq */
int card_irq; /* card irq */
int eject_irq; /* db1200/pb1200 have these */
+ int insert_gpio; /* db1000 carddetect gpio */
#define BOARD_TYPE_DEFAULT 0 /* most boards */
#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */
@@ -83,7 +84,7 @@ static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
/* carddetect gpio: low-active */
static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
{
- return !gpio_get_value(irq_to_gpio(sock->insert_irq));
+ return !gpio_get_value(sock->insert_gpio);
}
static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
@@ -457,9 +458,15 @@ static int db1x_pcmcia_socket_probe(struct platform_device *pdev)
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
sock->card_irq = r ? r->start : 0;
- /* insert: irq which triggers on card insertion/ejection */
+ /* insert: irq which triggers on card insertion/ejection
+ * BIG FAT NOTE: on DB1000/1100/1500/1550 we pass a GPIO here!
+ */
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
sock->insert_irq = r ? r->start : -1;
+ if (sock->board_type == BOARD_TYPE_DEFAULT) {
+ sock->insert_gpio = r ? r->start : -1;
+ sock->insert_irq = r ? gpio_to_irq(r->start) : -1;
+ }
/* stschg: irq which trigger on card status change (optional) */
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 03cb3ea2d2c0..566fd96b522d 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -215,6 +215,15 @@ config PHY_MT65XX_USB3
for mt65xx SoCs. it supports two usb2.0 ports and
one usb3.0 port.
+config PHY_HI6220_USB
+ tristate "hi6220 USB PHY support"
+ select GENERIC_PHY
+ select MFD_SYSCON
+ help
+ Enable this to support the HISILICON HI6220 USB PHY.
+
+ To compile this driver as a module, choose M here.
+
config PHY_SUN4I_USB
tristate "Allwinner sunxi SoC USB PHY driver"
depends on ARCH_SUNXI && HAS_IOMEM && OF
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 075db1a81aa5..faccda1f237f 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_TI_PIPE3) += phy-ti-pipe3.o
obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o
obj-$(CONFIG_PHY_EXYNOS5250_SATA) += phy-exynos5250-sata.o
obj-$(CONFIG_PHY_HIX5HD2_SATA) += phy-hix5hd2-sata.o
+obj-$(CONFIG_PHY_HI6220_USB) += phy-hi6220-usb.o
obj-$(CONFIG_PHY_MT65XX_USB3) += phy-mt65xx-usb3.o
obj-$(CONFIG_PHY_SUN4I_USB) += phy-sun4i-usb.o
obj-$(CONFIG_PHY_SUN9I_USB) += phy-sun9i-usb.o
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 8c7f27db6ad3..e7e574dc667a 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -275,20 +275,21 @@ EXPORT_SYMBOL_GPL(phy_exit);
int phy_power_on(struct phy *phy)
{
- int ret;
+ int ret = 0;
if (!phy)
- return 0;
+ goto out;
if (phy->pwr) {
ret = regulator_enable(phy->pwr);
if (ret)
- return ret;
+ goto out;
}
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
- return ret;
+ goto err_pm_sync;
+
ret = 0; /* Override possible ret == -ENOTSUPP */
mutex_lock(&phy->mutex);
@@ -296,19 +297,20 @@ int phy_power_on(struct phy *phy)
ret = phy->ops->power_on(phy);
if (ret < 0) {
dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
- goto out;
+ goto err_pwr_on;
}
}
++phy->power_count;
mutex_unlock(&phy->mutex);
return 0;
-out:
+err_pwr_on:
mutex_unlock(&phy->mutex);
phy_pm_runtime_put_sync(phy);
+err_pm_sync:
if (phy->pwr)
regulator_disable(phy->pwr);
-
+out:
return ret;
}
EXPORT_SYMBOL_GPL(phy_power_on);
diff --git a/drivers/phy/phy-hi6220-usb.c b/drivers/phy/phy-hi6220-usb.c
new file mode 100644
index 000000000000..b2141cbd4cf6
--- /dev/null
+++ b/drivers/phy/phy-hi6220-usb.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2015 Linaro Ltd.
+ * Copyright (c) 2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+
+#define SC_PERIPH_CTRL4 0x00c
+
+#define CTRL4_PICO_SIDDQ BIT(6)
+#define CTRL4_PICO_OGDISABLE BIT(8)
+#define CTRL4_PICO_VBUSVLDEXT BIT(10)
+#define CTRL4_PICO_VBUSVLDEXTSEL BIT(11)
+#define CTRL4_OTG_PHY_SEL BIT(21)
+
+#define SC_PERIPH_CTRL5 0x010
+
+#define CTRL5_USBOTG_RES_SEL BIT(3)
+#define CTRL5_PICOPHY_ACAENB BIT(4)
+#define CTRL5_PICOPHY_BC_MODE BIT(5)
+#define CTRL5_PICOPHY_CHRGSEL BIT(6)
+#define CTRL5_PICOPHY_VDATSRCEND BIT(7)
+#define CTRL5_PICOPHY_VDATDETENB BIT(8)
+#define CTRL5_PICOPHY_DCDENB BIT(9)
+#define CTRL5_PICOPHY_IDDIG BIT(10)
+
+#define SC_PERIPH_CTRL8 0x018
+#define SC_PERIPH_RSTEN0 0x300
+#define SC_PERIPH_RSTDIS0 0x304
+
+#define RST0_USBOTG_BUS BIT(4)
+#define RST0_POR_PICOPHY BIT(5)
+#define RST0_USBOTG BIT(6)
+#define RST0_USBOTG_32K BIT(7)
+
+#define EYE_PATTERN_PARA 0x7053348c
+
+struct hi6220_priv {
+ struct regmap *reg;
+ struct device *dev;
+};
+
+static void hi6220_phy_init(struct hi6220_priv *priv)
+{
+ struct regmap *reg = priv->reg;
+ u32 val, mask;
+
+ val = RST0_USBOTG_BUS | RST0_POR_PICOPHY |
+ RST0_USBOTG | RST0_USBOTG_32K;
+ mask = val;
+ regmap_update_bits(reg, SC_PERIPH_RSTEN0, mask, val);
+ regmap_update_bits(reg, SC_PERIPH_RSTDIS0, mask, val);
+}
+
+static int hi6220_phy_setup(struct hi6220_priv *priv, bool on)
+{
+ struct regmap *reg = priv->reg;
+ u32 val, mask;
+ int ret;
+
+ if (on) {
+ val = CTRL5_USBOTG_RES_SEL | CTRL5_PICOPHY_ACAENB;
+ mask = val | CTRL5_PICOPHY_BC_MODE;
+ ret = regmap_update_bits(reg, SC_PERIPH_CTRL5, mask, val);
+ if (ret)
+ goto out;
+
+ val = CTRL4_PICO_VBUSVLDEXT | CTRL4_PICO_VBUSVLDEXTSEL |
+ CTRL4_OTG_PHY_SEL;
+ mask = val | CTRL4_PICO_SIDDQ | CTRL4_PICO_OGDISABLE;
+ ret = regmap_update_bits(reg, SC_PERIPH_CTRL4, mask, val);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(reg, SC_PERIPH_CTRL8, EYE_PATTERN_PARA);
+ if (ret)
+ goto out;
+ } else {
+ val = CTRL4_PICO_SIDDQ;
+ mask = val;
+ ret = regmap_update_bits(reg, SC_PERIPH_CTRL4, mask, val);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ dev_err(priv->dev, "failed to setup phy ret: %d\n", ret);
+ return ret;
+}
+
+static int hi6220_phy_start(struct phy *phy)
+{
+ struct hi6220_priv *priv = phy_get_drvdata(phy);
+
+ return hi6220_phy_setup(priv, true);
+}
+
+static int hi6220_phy_exit(struct phy *phy)
+{
+ struct hi6220_priv *priv = phy_get_drvdata(phy);
+
+ return hi6220_phy_setup(priv, false);
+}
+
+static struct phy_ops hi6220_phy_ops = {
+ .init = hi6220_phy_start,
+ .exit = hi6220_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+static int hi6220_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct phy *phy;
+ struct hi6220_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->reg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "hisilicon,peripheral-syscon");
+ if (IS_ERR(priv->reg)) {
+ dev_err(dev, "no hisilicon,peripheral-syscon\n");
+ return PTR_ERR(priv->reg);
+ }
+
+ hi6220_phy_init(priv);
+
+ phy = devm_phy_create(dev, NULL, &hi6220_phy_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ phy_set_drvdata(phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id hi6220_phy_of_match[] = {
+ {.compatible = "hisilicon,hi6220-usb-phy",},
+ { },
+};
+MODULE_DEVICE_TABLE(of, hi6220_phy_of_match);
+
+static struct platform_driver hi6220_phy_driver = {
+ .probe = hi6220_phy_probe,
+ .driver = {
+ .name = "hi6220-usb-phy",
+ .of_match_table = hi6220_phy_of_match,
+ }
+};
+module_platform_driver(hi6220_phy_driver);
+
+MODULE_DESCRIPTION("HISILICON HI6220 USB PHY driver");
+MODULE_ALIAS("platform:hi6220-usb-phy");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 3a707dd14238..f96065a81d1e 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -715,6 +715,7 @@ static int twl4030_usb_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
/* Our job is to use irqs and status from the power module
* to keep the transceiver disabled when nothing's connected.
@@ -750,6 +751,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
struct twl4030_usb *twl = platform_get_drvdata(pdev);
int val;
+ usb_remove_phy(&twl->phy);
pm_runtime_get_sync(twl->dev);
cancel_delayed_work(&twl->id_workaround_work);
device_remove_file(twl->dev, &dev_attr_vbus);
@@ -757,6 +759,13 @@ static int twl4030_usb_remove(struct platform_device *pdev)
/* set transceiver mode to power on defaults */
twl4030_usb_set_mode(twl, -1);
+ /* idle ulpi before powering off */
+ if (cable_present(twl->linkstat))
+ pm_runtime_put_noidle(twl->dev);
+ pm_runtime_mark_last_busy(twl->dev);
+ pm_runtime_put_sync_suspend(twl->dev);
+ pm_runtime_disable(twl->dev);
+
/* autogate 60MHz ULPI clock,
* clear dpll clock request for i2c access,
* disable 32KHz
@@ -771,11 +780,6 @@ static int twl4030_usb_remove(struct platform_device *pdev)
/* disable complete OTG block */
twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
- if (cable_present(twl->linkstat))
- pm_runtime_put_noidle(twl->dev);
- pm_runtime_mark_last_busy(twl->dev);
- pm_runtime_put(twl->dev);
-
return 0;
}
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 2e6ca69635aa..17dd8fe12b54 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -779,7 +779,7 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
}
if (num_pulls) {
err = of_property_read_u32_index(np, "brcm,pull",
- (num_funcs > 1) ? i : 0, &pull);
+ (num_pulls > 1) ? i : 0, &pull);
if (err)
goto out;
err = bcm2835_pctl_dt_node_to_map_pull(pc, np, pin,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index a5bb93987378..1029aa7889b5 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -726,19 +726,18 @@ int imx_pinctrl_probe(struct platform_device *pdev,
if (of_property_read_bool(dev_np, "fsl,input-sel")) {
np = of_parse_phandle(dev_np, "fsl,input-sel", 0);
- if (np) {
- ipctl->input_sel_base = of_iomap(np, 0);
- if (IS_ERR(ipctl->input_sel_base)) {
- of_node_put(np);
- dev_err(&pdev->dev,
- "iomuxc input select base address not found\n");
- return PTR_ERR(ipctl->input_sel_base);
- }
- } else {
+ if (!np) {
dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n");
return -EINVAL;
}
+
+ ipctl->input_sel_base = of_iomap(np, 0);
of_node_put(np);
+ if (!ipctl->input_sel_base) {
+ dev_err(&pdev->dev,
+ "iomuxc input select base address not found\n");
+ return -ENOMEM;
+ }
}
imx_pinctrl_desc.name = dev_name(&pdev->dev);
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index eebfae0c9b7c..f844b4ae7f79 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -995,7 +995,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
int val;
if (pull)
- pullidx = data_out ? 1 : 2;
+ pullidx = data_out ? 2 : 1;
seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
gpio,
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 85c9046c690e..6b1a47f8c096 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -469,27 +469,27 @@ static const char * const pistachio_mips_pll_lock_groups[] = {
"mfio83",
};
-static const char * const pistachio_sys_pll_lock_groups[] = {
+static const char * const pistachio_audio_pll_lock_groups[] = {
"mfio84",
};
-static const char * const pistachio_wifi_pll_lock_groups[] = {
+static const char * const pistachio_rpu_v_pll_lock_groups[] = {
"mfio85",
};
-static const char * const pistachio_bt_pll_lock_groups[] = {
+static const char * const pistachio_rpu_l_pll_lock_groups[] = {
"mfio86",
};
-static const char * const pistachio_rpu_v_pll_lock_groups[] = {
+static const char * const pistachio_sys_pll_lock_groups[] = {
"mfio87",
};
-static const char * const pistachio_rpu_l_pll_lock_groups[] = {
+static const char * const pistachio_wifi_pll_lock_groups[] = {
"mfio88",
};
-static const char * const pistachio_audio_pll_lock_groups[] = {
+static const char * const pistachio_bt_pll_lock_groups[] = {
"mfio89",
};
@@ -559,12 +559,12 @@ enum pistachio_mux_option {
PISTACHIO_FUNCTION_DREQ4,
PISTACHIO_FUNCTION_DREQ5,
PISTACHIO_FUNCTION_MIPS_PLL_LOCK,
+ PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
+ PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
+ PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
PISTACHIO_FUNCTION_SYS_PLL_LOCK,
PISTACHIO_FUNCTION_WIFI_PLL_LOCK,
PISTACHIO_FUNCTION_BT_PLL_LOCK,
- PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
- PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
- PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND,
PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND,
PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND,
@@ -620,12 +620,12 @@ static const struct pistachio_function pistachio_functions[] = {
FUNCTION(dreq4),
FUNCTION(dreq5),
FUNCTION(mips_pll_lock),
+ FUNCTION(audio_pll_lock),
+ FUNCTION(rpu_v_pll_lock),
+ FUNCTION(rpu_l_pll_lock),
FUNCTION(sys_pll_lock),
FUNCTION(wifi_pll_lock),
FUNCTION(bt_pll_lock),
- FUNCTION(rpu_v_pll_lock),
- FUNCTION(rpu_l_pll_lock),
- FUNCTION(audio_pll_lock),
FUNCTION(debug_raw_cca_ind),
FUNCTION(debug_ed_sec20_cca_ind),
FUNCTION(debug_ed_sec40_cca_ind),
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 181ea98a63b7..2b0d70217bbd 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -545,7 +545,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
return ret;
}
- pinctrl_provide_dummies();
+ /* Enable dummy states for those platforms without pinctrl support */
+ if (!of_have_populated_dt())
+ pinctrl_provide_dummies();
ret = sh_pfc_init_ranges(pfc);
if (ret < 0)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
index 00265f0435a7..8b381d69df86 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
@@ -485,6 +485,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = {
.pins = sun8i_a33_pins,
.npins = ARRAY_SIZE(sun8i_a33_pins),
.irq_banks = 2,
+ .irq_bank_base = 1,
};
static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index dead97daca35..a4a5b504c532 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -578,7 +578,7 @@ static void sunxi_pinctrl_irq_release_resources(struct irq_data *d)
static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- u32 reg = sunxi_irq_cfg_reg(d->hwirq);
+ u32 reg = sunxi_irq_cfg_reg(d->hwirq, pctl->desc->irq_bank_base);
u8 index = sunxi_irq_cfg_offset(d->hwirq);
unsigned long flags;
u32 regval;
@@ -625,7 +625,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
static void sunxi_pinctrl_irq_ack(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- u32 status_reg = sunxi_irq_status_reg(d->hwirq);
+ u32 status_reg = sunxi_irq_status_reg(d->hwirq,
+ pctl->desc->irq_bank_base);
u8 status_idx = sunxi_irq_status_offset(d->hwirq);
/* Clear the IRQ */
@@ -635,7 +636,7 @@ static void sunxi_pinctrl_irq_ack(struct irq_data *d)
static void sunxi_pinctrl_irq_mask(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
+ u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
unsigned long flags;
u32 val;
@@ -652,7 +653,7 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
+ u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
unsigned long flags;
u32 val;
@@ -744,7 +745,7 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
if (bank == pctl->desc->irq_banks)
return;
- reg = sunxi_irq_status_reg_from_bank(bank);
+ reg = sunxi_irq_status_reg_from_bank(bank, pctl->desc->irq_bank_base);
val = readl(pctl->membase + reg);
if (val) {
@@ -1023,9 +1024,11 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
for (i = 0; i < pctl->desc->irq_banks; i++) {
/* Mask and clear all IRQs before registering a handler */
- writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i));
+ writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i,
+ pctl->desc->irq_bank_base));
writel(0xffffffff,
- pctl->membase + sunxi_irq_status_reg_from_bank(i));
+ pctl->membase + sunxi_irq_status_reg_from_bank(i,
+ pctl->desc->irq_bank_base));
irq_set_chained_handler_and_data(pctl->irq[i],
sunxi_pinctrl_irq_handler,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index e248e81a0f9e..0afce1ab12d0 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -97,6 +97,7 @@ struct sunxi_pinctrl_desc {
int npins;
unsigned pin_base;
unsigned irq_banks;
+ unsigned irq_bank_base;
bool irq_read_needs_mux;
};
@@ -233,12 +234,12 @@ static inline u32 sunxi_pull_offset(u16 pin)
return pin_num * PULL_PINS_BITS;
}
-static inline u32 sunxi_irq_cfg_reg(u16 irq)
+static inline u32 sunxi_irq_cfg_reg(u16 irq, unsigned bank_base)
{
u8 bank = irq / IRQ_PER_BANK;
u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04;
- return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg;
+ return IRQ_CFG_REG + (bank_base + bank) * IRQ_MEM_SIZE + reg;
}
static inline u32 sunxi_irq_cfg_offset(u16 irq)
@@ -247,16 +248,16 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
return irq_num * IRQ_CFG_IRQ_BITS;
}
-static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank)
+static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank, unsigned bank_base)
{
- return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE;
+ return IRQ_CTRL_REG + (bank_base + bank) * IRQ_MEM_SIZE;
}
-static inline u32 sunxi_irq_ctrl_reg(u16 irq)
+static inline u32 sunxi_irq_ctrl_reg(u16 irq, unsigned bank_base)
{
u8 bank = irq / IRQ_PER_BANK;
- return sunxi_irq_ctrl_reg_from_bank(bank);
+ return sunxi_irq_ctrl_reg_from_bank(bank, bank_base);
}
static inline u32 sunxi_irq_ctrl_offset(u16 irq)
@@ -265,16 +266,16 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
return irq_num * IRQ_CTRL_IRQ_BITS;
}
-static inline u32 sunxi_irq_status_reg_from_bank(u8 bank)
+static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base)
{
- return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE;
+ return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE;
}
-static inline u32 sunxi_irq_status_reg(u16 irq)
+static inline u32 sunxi_irq_status_reg(u16 irq, unsigned bank_base)
{
u8 bank = irq / IRQ_PER_BANK;
- return sunxi_irq_status_reg_from_bank(bank);
+ return sunxi_irq_status_reg_from_bank(bank, bank_base);
}
static inline u32 sunxi_irq_status_offset(u16 irq)
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index a313dfc0245f..be3bc2f4edd4 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -865,6 +865,27 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo ideapad Y700-15ISK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ISK"),
+ },
+ },
+ {
+ .ident = "Lenovo ideapad Y700 Touch-15ISK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700 Touch-15ISK"),
+ },
+ },
+ {
+ .ident = "Lenovo ideapad Y700-17ISK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-17ISK"),
+ },
+ },
+ {
.ident = "Lenovo Yoga 2 11 / 13 / Pro",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -893,6 +914,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo Yoga 700",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 700"),
+ },
+ },
+ {
.ident = "Lenovo Yoga 900",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
index 02bc5a6343c3..aa454241489c 100644
--- a/drivers/platform/x86/intel_scu_ipcutil.c
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -49,7 +49,7 @@ struct scu_ipc_data {
static int scu_reg_access(u32 cmd, struct scu_ipc_data *data)
{
- int count = data->count;
+ unsigned int count = data->count;
if (count == 0 || count == 3 || count > 4)
return -EINVAL;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index c01302989ee4..b0f62141ea4d 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -2484,6 +2484,14 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
brightness = __get_lcd_brightness(dev);
if (brightness < 0)
return 0;
+ /*
+ * If transflective backlight is supported and the brightness is zero
+ * (lowest brightness level), the set_lcd_brightness function will
+ * activate the transflective backlight, making the LCD appear to be
+ * turned off, simply increment the brightness level to avoid that.
+ */
+ if (dev->tr_backlight_supported && brightness == 0)
+ brightness++;
ret = set_lcd_brightness(dev, brightness);
if (ret) {
pr_debug("Backlight method is read-only, disabling backlight support\n");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 8df0b0e62976..10c46e236e8b 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -261,6 +261,21 @@ config REGULATOR_HI6421
21 general purpose LDOs, 3 dedicated LDOs, and 5 BUCKs. All
of them come with support to either ECO (idle) or sleep mode.
+config REGULATOR_HI6220_MTCMOS
+ bool "Hisilicon Hi6220 mtcmos support"
+ depends on ARCH_HISI
+ help
+ This driver provides support for the mtcmos regulators of Hi6220 Soc.
+
+
+config REGULATOR_HI655X
+ tristate "Hisilicon HI655X PMIC regulators support"
+ depends on ARCH_HISI || COMPILE_TEST
+ depends on MFD_HI655X_PMIC && OF
+ help
+ This driver provides support for the voltage regulators of the
+ Hisilicon Hi655x PMIC device.
+
config REGULATOR_ISL9305
tristate "Intersil ISL9305 regulator"
depends on I2C
@@ -446,6 +461,7 @@ config REGULATOR_MC13892
config REGULATOR_MT6311
tristate "MediaTek MT6311 PMIC"
depends on I2C
+ select REGMAP_I2C
help
Say y here to select this option to enable the power regulator of
MediaTek MT6311 PMIC.
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 0f8174913c17..7345d43551ee 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -34,6 +34,8 @@ obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
obj-$(CONFIG_REGULATOR_FAN53555) += fan53555.o
obj-$(CONFIG_REGULATOR_GPIO) += gpio-regulator.o
obj-$(CONFIG_REGULATOR_HI6421) += hi6421-regulator.o
+obj-$(CONFIG_REGULATOR_HI6220_MTCMOS) += hi6220-mtcmos.o
+obj-$(CONFIG_REGULATOR_HI655X) += hi655x-regulator.o
obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
obj-$(CONFIG_REGULATOR_ISL9305) += isl9305.o
obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 35de22fdb7a0..f2e1a39ce0f3 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -27,8 +27,8 @@
#define AXP20X_IO_ENABLED 0x03
#define AXP20X_IO_DISABLED 0x07
-#define AXP22X_IO_ENABLED 0x04
-#define AXP22X_IO_DISABLED 0x03
+#define AXP22X_IO_ENABLED 0x03
+#define AXP22X_IO_DISABLED 0x04
#define AXP20X_WORKMODE_DCDC2_MASK BIT(2)
#define AXP20X_WORKMODE_DCDC3_MASK BIT(1)
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 73b7683355cd..7b94b8ee087c 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -132,24 +132,24 @@ static bool have_full_constraints(void)
return has_full_constraints || of_have_populated_dt();
}
+static inline struct regulator_dev *rdev_get_supply(struct regulator_dev *rdev)
+{
+ if (rdev && rdev->supply)
+ return rdev->supply->rdev;
+
+ return NULL;
+}
+
/**
* regulator_lock_supply - lock a regulator and its supplies
* @rdev: regulator source
*/
static void regulator_lock_supply(struct regulator_dev *rdev)
{
- struct regulator *supply;
- int i = 0;
-
- while (1) {
- mutex_lock_nested(&rdev->mutex, i++);
- supply = rdev->supply;
-
- if (!rdev->supply)
- return;
+ int i;
- rdev = supply->rdev;
- }
+ for (i = 0; rdev->supply; rdev = rdev_get_supply(rdev), i++)
+ mutex_lock_nested(&rdev->mutex, i);
}
/**
diff --git a/drivers/regulator/hi6220-mtcmos.c b/drivers/regulator/hi6220-mtcmos.c
new file mode 100644
index 000000000000..492be7adfaa2
--- /dev/null
+++ b/drivers/regulator/hi6220-mtcmos.c
@@ -0,0 +1,269 @@
+/*
+ * Device driver for MTCMOS DRIVER in HI6220 SOC
+ *
+ * Copyright (c) 2011 Hisilicon Co. Ltd
+ *
+ */
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+
+enum {
+ HI6220_MTCMOS1,
+ HI6220_MTCMOS2,
+ HI6220_RG_MAX,
+};
+
+struct hi6220_mtcmos_ctrl_regs {
+ unsigned int enable_reg;
+ unsigned int disable_reg;
+ unsigned int status_reg;
+};
+
+struct hi6220_mtcmos_ctrl_data {
+ int shift;
+ unsigned int mask;
+};
+
+struct hi6220_mtcmos_info {
+ struct regulator_desc rdesc;
+ struct hi6220_mtcmos_ctrl_regs ctrl_regs;
+ struct hi6220_mtcmos_ctrl_data ctrl_data;
+};
+
+struct hi6220_mtcmos {
+ struct regulator_dev *rdev[HI6220_RG_MAX];
+ void __iomem *sc_on_regs;
+ int mtcmos_steady_time;
+ spinlock_t mtcmos_spin_lock;
+};
+
+static int hi6220_mtcmos_is_on(struct hi6220_mtcmos *mtcmos,
+ unsigned int regs, unsigned int mask, int shift)
+{
+ unsigned int ret;
+ unsigned long mtcmos_spin_flag = 0;
+
+ spin_lock_irqsave(&mtcmos->mtcmos_spin_lock, mtcmos_spin_flag);
+ ret = readl(mtcmos->sc_on_regs + regs);
+ spin_unlock_irqrestore(&mtcmos->mtcmos_spin_lock, mtcmos_spin_flag);
+
+ ret &= (mask << shift);
+ return !!ret;
+}
+
+int hi6220_mtcmos_on(struct hi6220_mtcmos *mtcmos,
+ unsigned int regs, unsigned int mask, int shift)
+{
+ unsigned long mtcmos_spin_flag = 0;
+
+ spin_lock_irqsave(&mtcmos->mtcmos_spin_lock, mtcmos_spin_flag);
+ writel(mask << shift, mtcmos->sc_on_regs + regs);
+ udelay(mtcmos->mtcmos_steady_time);
+ spin_unlock_irqrestore(&mtcmos->mtcmos_spin_lock, mtcmos_spin_flag);
+
+ return 0;
+}
+
+int hi6220_mtcmos_off(struct hi6220_mtcmos *mtcmos,
+ unsigned int regs, unsigned int mask, int shift)
+{
+ unsigned long mtcmos_spin_flag = 0;
+
+ spin_lock_irqsave(&mtcmos->mtcmos_spin_lock, mtcmos_spin_flag);
+ writel(mask << shift, mtcmos->sc_on_regs + regs);
+ udelay(mtcmos->mtcmos_steady_time);
+ spin_unlock_irqrestore(&mtcmos->mtcmos_spin_lock,
+ mtcmos_spin_flag);
+
+ return 0;
+}
+
+static int hi6220_regulator_mtcmos_is_enabled(struct regulator_dev *rdev)
+{
+ int ret;
+ struct hi6220_mtcmos_info *sreg = rdev_get_drvdata(rdev);
+ struct platform_device *pdev =
+ container_of(rdev->dev.parent, struct platform_device, dev);
+ struct hi6220_mtcmos *mtcmos = platform_get_drvdata(pdev);
+ struct hi6220_mtcmos_ctrl_regs *ctrl_regs = &(sreg->ctrl_regs);
+ struct hi6220_mtcmos_ctrl_data *ctrl_data = &(sreg->ctrl_data);
+
+ ret = hi6220_mtcmos_is_on(mtcmos, ctrl_regs->status_reg,
+ ctrl_data->mask, ctrl_data->shift);
+ return ret;
+}
+
+static int hi6220_regulator_mtcmos_enabled(struct regulator_dev *rdev)
+{
+ int ret;
+ struct hi6220_mtcmos_info *sreg = rdev_get_drvdata(rdev);
+ struct platform_device *pdev =
+ container_of(rdev->dev.parent, struct platform_device, dev);
+ struct hi6220_mtcmos *mtcmos = platform_get_drvdata(pdev);
+ struct hi6220_mtcmos_ctrl_regs *ctrl_regs = &(sreg->ctrl_regs);
+ struct hi6220_mtcmos_ctrl_data *ctrl_data = &(sreg->ctrl_data);
+
+ ret = hi6220_mtcmos_on(mtcmos, ctrl_regs->enable_reg,
+ ctrl_data->mask, ctrl_data->shift);
+ if (0 == hi6220_mtcmos_is_on(mtcmos, ctrl_regs->status_reg,
+ ctrl_data->mask, ctrl_data->shift)) {
+ return -1;
+ }
+ return ret;
+}
+
+static int hi6220_regulator_mtcmos_disabled(struct regulator_dev *rdev)
+{
+ int ret;
+ struct hi6220_mtcmos_info *sreg = rdev_get_drvdata(rdev);
+ struct platform_device *pdev =
+ container_of(rdev->dev.parent, struct platform_device, dev);
+ struct hi6220_mtcmos *mtcmos = platform_get_drvdata(pdev);
+ struct hi6220_mtcmos_ctrl_regs *ctrl_regs = &(sreg->ctrl_regs);
+ struct hi6220_mtcmos_ctrl_data *ctrl_data = &(sreg->ctrl_data);
+
+ ret = hi6220_mtcmos_off(mtcmos, ctrl_regs->disable_reg,
+ ctrl_data->mask, ctrl_data->shift);
+
+ return ret;
+}
+
+static struct regulator_ops hi6220_mtcmos_mtcmos_rops = {
+ .is_enabled = hi6220_regulator_mtcmos_is_enabled,
+ .enable = hi6220_regulator_mtcmos_enabled,
+ .disable = hi6220_regulator_mtcmos_disabled,
+};
+
+#define HI6220_MTCMOS(vreg) \
+{ \
+ .rdesc = { \
+ .name = #vreg, \
+ .ops = &hi6220_mtcmos_mtcmos_rops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+}
+
+static struct hi6220_mtcmos_info hi6220_mtcmos_info[] = {
+ HI6220_MTCMOS(MTCMOS1),
+ HI6220_MTCMOS(MTCMOS2),
+};
+
+static struct of_regulator_match hi6220_mtcmos_matches[] = {
+ { .name = "mtcmos1",
+ .driver_data = &hi6220_mtcmos_info[HI6220_MTCMOS1], },
+ { .name = "mtcmos2",
+ .driver_data = &hi6220_mtcmos_info[HI6220_MTCMOS2], },
+};
+
+static int hi6220_mtcmos_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct hi6220_mtcmos *mtcmos;
+ const __be32 *sc_on_regs = NULL;
+ void __iomem *regs;
+ struct device *dev;
+ struct device_node *np, *child;
+ int count, i;
+ struct regulator_config config = { };
+ struct regulator_init_data *init_data;
+ struct hi6220_mtcmos_info *sreg;
+
+ dev = &pdev->dev;
+ np = dev->of_node;
+ mtcmos = devm_kzalloc(dev,
+ sizeof(struct hi6220_mtcmos), GFP_KERNEL);
+ if (!mtcmos) {
+ dev_err(dev, "cannot allocate hi6220_mtcmos device info\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init((spinlock_t *)&mtcmos->mtcmos_spin_lock);
+ sc_on_regs = of_get_property(np, "hisilicon,mtcmos-sc-on-base", NULL);
+ if (sc_on_regs) {
+ regs = ioremap(be32_to_cpu(*sc_on_regs), 0x1000);
+ mtcmos->sc_on_regs = regs;
+ }
+ ret = of_property_read_u32(np, "hisilicon,mtcmos-steady-us",
+ &mtcmos->mtcmos_steady_time);
+
+ count = of_regulator_match(&pdev->dev, np,
+ hi6220_mtcmos_matches,
+ ARRAY_SIZE(hi6220_mtcmos_matches));
+
+ for (i = 0; i < HI6220_RG_MAX; i++) {
+ init_data = hi6220_mtcmos_matches[i].init_data;
+ if (!init_data)
+ continue;
+ sreg = hi6220_mtcmos_matches[i].driver_data;
+ config.dev = &pdev->dev;
+ config.init_data = init_data;
+ config.driver_data = sreg;
+ config.of_node = hi6220_mtcmos_matches[i].of_node;
+ child = config.of_node;
+
+ ret = of_property_read_u32_array(child, "hisilicon,ctrl-regs",
+ (unsigned int *)(&sreg->ctrl_regs), 0x3);
+ ret = of_property_read_u32_array(child, "hisilicon,ctrl-data",
+ (unsigned int *)(&sreg->ctrl_data), 0x2);
+
+ mtcmos->rdev[i] = regulator_register(&sreg->rdesc, &config);
+ if (IS_ERR(mtcmos->rdev[i])) {
+ ret = PTR_ERR(mtcmos->rdev[i]);
+ dev_err(&pdev->dev, "failed to register mtcmos %s\n",
+ sreg->rdesc.name);
+ while (--i >= 0)
+ regulator_unregister(mtcmos->rdev[i]);
+
+ return ret;
+ }
+ }
+
+ platform_set_drvdata(pdev, mtcmos);
+
+ return 0;
+}
+
+static struct of_device_id of_hi6220_mtcmos_match_tbl[] = {
+ { .compatible = "hisilicon,hi6220-mtcmos-driver", },
+ {}
+};
+
+static struct platform_driver mtcmos_driver = {
+ .driver = {
+ .name = "hisi_hi6220_mtcmos",
+ .owner = THIS_MODULE,
+ .of_match_table = of_hi6220_mtcmos_match_tbl,
+ },
+ .probe = hi6220_mtcmos_probe,
+};
+
+static int __init hi6220_mtcmos_init(void)
+{
+ return platform_driver_register(&mtcmos_driver);
+}
+
+static void __exit hi6220_mtcmos_exit(void)
+{
+ platform_driver_unregister(&mtcmos_driver);
+}
+
+fs_initcall(hi6220_mtcmos_init);
+module_exit(hi6220_mtcmos_exit);
+
+MODULE_AUTHOR("Baixing Quan<quanbaixing@huawei.com>");
+MODULE_DESCRIPTION("HI6220 MTCMOS interface driver");
+MODULE_LICENSE("GPL V2");
diff --git a/drivers/regulator/hi655x-regulator.c b/drivers/regulator/hi655x-regulator.c
new file mode 100644
index 000000000000..9074355791a2
--- /dev/null
+++ b/drivers/regulator/hi655x-regulator.c
@@ -0,0 +1,226 @@
+/*
+ * Device driver for regulators in hi655x IC
+ *
+ * Copyright (c) 2016 Hisilicon.
+ *
+ * Authors:
+ * Chen Feng <puck.chen@hisilicon.com>
+ * Fei Wang <w.f@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/mfd/hi655x-pmic.h>
+
+struct hi655x_regulator {
+ unsigned int disable_reg;
+ unsigned int status_reg;
+ unsigned int ctrl_regs;
+ unsigned int ctrl_mask;
+ struct regulator_desc rdesc;
+};
+
+static const unsigned int ldo7_voltages[] = {
+ 1800000, 1850000, 2850000, 2900000,
+ 3000000, 3100000, 3200000, 3300000,
+};
+
+static const unsigned int ldo19_voltages[] = {
+ 1800000, 1850000, 1900000, 1750000,
+ 2800000, 2850000, 2900000, 3000000,
+};
+
+static const unsigned int ldo22_voltages[] = {
+ 900000, 1000000, 1050000, 1100000,
+ 1150000, 1175000, 1185000, 1200000,
+};
+
+enum hi655x_regulator_id {
+ HI655X_LDO0,
+ HI655X_LDO1,
+ HI655X_LDO2,
+ HI655X_LDO3,
+ HI655X_LDO4,
+ HI655X_LDO5,
+ HI655X_LDO6,
+ HI655X_LDO7,
+ HI655X_LDO8,
+ HI655X_LDO9,
+ HI655X_LDO10,
+ HI655X_LDO11,
+ HI655X_LDO12,
+ HI655X_LDO13,
+ HI655X_LDO14,
+ HI655X_LDO15,
+ HI655X_LDO16,
+ HI655X_LDO17,
+ HI655X_LDO18,
+ HI655X_LDO19,
+ HI655X_LDO20,
+ HI655X_LDO21,
+ HI655X_LDO22,
+};
+
+static int hi655x_is_enabled(struct regulator_dev *rdev)
+{
+ unsigned int value = 0;
+
+ struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
+
+ regmap_read(rdev->regmap, regulator->status_reg, &value);
+ return (value & BIT(regulator->ctrl_mask));
+}
+
+static int hi655x_disable(struct regulator_dev *rdev)
+{
+ int ret = 0;
+
+ struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
+
+ ret = regmap_write(rdev->regmap, regulator->disable_reg,
+ BIT(regulator->ctrl_mask));
+ return ret;
+}
+
+static struct regulator_ops hi655x_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = hi655x_disable,
+ .is_enabled = hi655x_is_enabled,
+ .list_voltage = regulator_list_voltage_table,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static struct regulator_ops hi655x_ldo_linear_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = hi655x_disable,
+ .is_enabled = hi655x_is_enabled,
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+#define HI655X_LDO(_ID, vreg, vmask, ereg, dreg, \
+ sreg, cmask, vtable) { \
+ .rdesc = { \
+ .name = #_ID, \
+ .of_match = of_match_ptr(#_ID), \
+ .ops = &hi655x_regulator_ops, \
+ .regulators_node = of_match_ptr("regulators"), \
+ .type = REGULATOR_VOLTAGE, \
+ .id = HI655X_##_ID, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(vtable), \
+ .volt_table = vtable, \
+ .vsel_reg = HI655X_BUS_ADDR(vreg), \
+ .vsel_mask = vmask, \
+ .enable_reg = HI655X_BUS_ADDR(ereg), \
+ .enable_mask = BIT(cmask), \
+ }, \
+ .disable_reg = HI655X_BUS_ADDR(dreg), \
+ .status_reg = HI655X_BUS_ADDR(sreg), \
+ .ctrl_mask = cmask, \
+}
+
+#define HI655X_LDO_LINEAR(_ID, vreg, vmask, ereg, dreg, \
+ sreg, cmask, minv, nvolt, vstep) { \
+ .rdesc = { \
+ .name = #_ID, \
+ .of_match = of_match_ptr(#_ID), \
+ .ops = &hi655x_ldo_linear_ops, \
+ .regulators_node = of_match_ptr("regulators"), \
+ .type = REGULATOR_VOLTAGE, \
+ .id = HI655X_##_ID, \
+ .owner = THIS_MODULE, \
+ .min_uV = minv, \
+ .n_voltages = nvolt, \
+ .uV_step = vstep, \
+ .vsel_reg = HI655X_BUS_ADDR(vreg), \
+ .vsel_mask = vmask, \
+ .enable_reg = HI655X_BUS_ADDR(ereg), \
+ .enable_mask = BIT(cmask), \
+ }, \
+ .disable_reg = HI655X_BUS_ADDR(dreg), \
+ .status_reg = HI655X_BUS_ADDR(sreg), \
+ .ctrl_mask = cmask, \
+}
+
+static struct hi655x_regulator regulators[] = {
+ HI655X_LDO_LINEAR(LDO2, 0x72, 0x07, 0x29, 0x2a, 0x2b, 0x01,
+ 2500000, 8, 100000),
+ HI655X_LDO(LDO7, 0x78, 0x07, 0x29, 0x2a, 0x2b, 0x06, ldo7_voltages),
+ HI655X_LDO(LDO10, 0x78, 0x07, 0x29, 0x2a, 0x2b, 0x01, ldo7_voltages),
+ HI655X_LDO_LINEAR(LDO13, 0x7e, 0x07, 0x2c, 0x2d, 0x2e, 0x04,
+ 1600000, 8, 50000),
+ HI655X_LDO_LINEAR(LDO14, 0x7f, 0x07, 0x2c, 0x2d, 0x2e, 0x05,
+ 2500000, 8, 100000),
+ HI655X_LDO_LINEAR(LDO15, 0x80, 0x07, 0x2c, 0x2d, 0x2e, 0x06,
+ 1600000, 8, 50000),
+ HI655X_LDO_LINEAR(LDO17, 0x82, 0x07, 0x2f, 0x30, 0x31, 0x00,
+ 2500000, 8, 100000),
+ HI655X_LDO(LDO19, 0x84, 0x07, 0x2f, 0x30, 0x31, 0x02, ldo19_voltages),
+ HI655X_LDO_LINEAR(LDO21, 0x86, 0x07, 0x2f, 0x30, 0x31, 0x04,
+ 1650000, 8, 50000),
+ HI655X_LDO(LDO22, 0x87, 0x07, 0x2f, 0x30, 0x31, 0x05, ldo22_voltages),
+};
+
+static int hi655x_regulator_probe(struct platform_device *pdev)
+{
+ unsigned int i;
+ struct hi655x_regulator *regulator;
+ struct hi655x_pmic *pmic;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+
+ pmic = dev_get_drvdata(pdev->dev.parent);
+ if (!pmic) {
+ dev_err(&pdev->dev, "no pmic in the regulator parent node\n");
+ return -ENODEV;
+ }
+
+ regulator = devm_kzalloc(&pdev->dev, sizeof(*regulator), GFP_KERNEL);
+ if (!regulator)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, regulator);
+
+ config.dev = pdev->dev.parent;
+ config.regmap = pmic->regmap;
+ config.driver_data = regulator;
+ for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ rdev = devm_regulator_register(&pdev->dev,
+ &regulators[i].rdesc,
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ regulator->rdesc.name);
+ return PTR_ERR(rdev);
+ }
+ }
+ return 0;
+}
+
+static struct platform_driver hi655x_regulator_driver = {
+ .driver = {
+ .name = "hi655x-regulator",
+ },
+ .probe = hi655x_regulator_probe,
+};
+module_platform_driver(hi655x_regulator_driver);
+
+MODULE_AUTHOR("Chen Feng <puck.chen@hisilicon.com>");
+MODULE_DESCRIPTION("Hisilicon hi655x regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 0615f50a14cd..df37212a5cbd 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -13,3 +13,4 @@ menuconfig RESET_CONTROLLER
If unsure, say no.
source "drivers/reset/sti/Kconfig"
+source "drivers/reset/hisilicon/Kconfig"
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 85d5904e5480..99e18c875e8a 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -4,5 +4,6 @@ obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o
obj-$(CONFIG_ARCH_BERLIN) += reset-berlin.o
obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
obj-$(CONFIG_ARCH_STI) += sti/
+obj-$(CONFIG_ARCH_HISI) += hisilicon/
obj-$(CONFIG_ARCH_ZYNQ) += reset-zynq.o
obj-$(CONFIG_ATH79) += reset-ath79.o
diff --git a/drivers/reset/hisilicon/Kconfig b/drivers/reset/hisilicon/Kconfig
new file mode 100644
index 000000000000..26bf95a83a8e
--- /dev/null
+++ b/drivers/reset/hisilicon/Kconfig
@@ -0,0 +1,5 @@
+config COMMON_RESET_HI6220
+ tristate "Hi6220 Reset Driver"
+ depends on (ARCH_HISI && RESET_CONTROLLER)
+ help
+ Build the Hisilicon Hi6220 reset driver.
diff --git a/drivers/reset/hisilicon/Makefile b/drivers/reset/hisilicon/Makefile
new file mode 100644
index 000000000000..c932f86e2f10
--- /dev/null
+++ b/drivers/reset/hisilicon/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_COMMON_RESET_HI6220) += hi6220_reset.o
diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
new file mode 100644
index 000000000000..7787a9b1cc67
--- /dev/null
+++ b/drivers/reset/hisilicon/hi6220_reset.c
@@ -0,0 +1,109 @@
+/*
+ * Hisilicon Hi6220 reset controller driver
+ *
+ * Copyright (c) 2015 Hisilicon Limited.
+ *
+ * Author: Feng Chen <puck.chen@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/of.h>
+#include <linux/reset-controller.h>
+#include <linux/reset.h>
+#include <linux/platform_device.h>
+
+#define ASSERT_OFFSET 0x300
+#define DEASSERT_OFFSET 0x304
+#define MAX_INDEX 0x509
+
+#define to_reset_data(x) container_of(x, struct hi6220_reset_data, rc_dev)
+
+struct hi6220_reset_data {
+ void __iomem *assert_base;
+ void __iomem *deassert_base;
+ struct reset_controller_dev rc_dev;
+};
+
+static int hi6220_reset_assert(struct reset_controller_dev *rc_dev,
+ unsigned long idx)
+{
+ struct hi6220_reset_data *data = to_reset_data(rc_dev);
+
+ int bank = idx >> 8;
+ int offset = idx & 0xff;
+
+ writel(BIT(offset), data->assert_base + (bank * 0x10));
+
+ return 0;
+}
+
+static int hi6220_reset_deassert(struct reset_controller_dev *rc_dev,
+ unsigned long idx)
+{
+ struct hi6220_reset_data *data = to_reset_data(rc_dev);
+
+ int bank = idx >> 8;
+ int offset = idx & 0xff;
+
+ writel(BIT(offset), data->deassert_base + (bank * 0x10));
+
+ return 0;
+}
+
+static struct reset_control_ops hi6220_reset_ops = {
+ .assert = hi6220_reset_assert,
+ .deassert = hi6220_reset_deassert,
+};
+
+static int hi6220_reset_probe(struct platform_device *pdev)
+{
+ struct hi6220_reset_data *data;
+ struct resource *res;
+ void __iomem *src_base;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ src_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(src_base))
+ return PTR_ERR(src_base);
+
+ data->assert_base = src_base + ASSERT_OFFSET;
+ data->deassert_base = src_base + DEASSERT_OFFSET;
+ data->rc_dev.nr_resets = MAX_INDEX;
+ data->rc_dev.ops = &hi6220_reset_ops;
+ data->rc_dev.of_node = pdev->dev.of_node;
+
+ reset_controller_register(&data->rc_dev);
+
+ return 0;
+}
+
+static const struct of_device_id hi6220_reset_match[] = {
+ { .compatible = "hisilicon,hi6220-sysctrl" },
+ { },
+};
+
+static struct platform_driver hi6220_reset_driver = {
+ .probe = hi6220_reset_probe,
+ .driver = {
+ .name = "reset-hi6220",
+ .of_match_table = hi6220_reset_match,
+ },
+};
+
+static int __init hi6220_reset_init(void)
+{
+ return platform_driver_register(&hi6220_reset_driver);
+}
+
+postcore_initcall(hi6220_reset_init);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index a263c10359e1..4abfbdb285ec 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3031,6 +3031,7 @@ static void dasd_setup_queue(struct dasd_block *block)
max = block->base->discipline->max_blocks << block->s2b_shift;
}
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
+ block->request_queue->limits.max_dev_sectors = max;
blk_queue_logical_block_size(block->request_queue,
block->bp_block);
blk_queue_max_hw_sectors(block->request_queue, max);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 184b1dbeb554..286782c60da4 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
spin_unlock_irqrestore(&lcu->lock, flags);
cancel_work_sync(&lcu->suc_data.worker);
spin_lock_irqsave(&lcu->lock, flags);
- if (device == lcu->suc_data.device)
+ if (device == lcu->suc_data.device) {
+ dasd_put_device(device);
lcu->suc_data.device = NULL;
+ }
}
was_pending = 0;
if (device == lcu->ruac_data.device) {
@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
was_pending = 1;
cancel_delayed_work_sync(&lcu->ruac_data.dwork);
spin_lock_irqsave(&lcu->lock, flags);
- if (device == lcu->ruac_data.device)
+ if (device == lcu->ruac_data.device) {
+ dasd_put_device(device);
lcu->ruac_data.device = NULL;
+ }
}
private->lcu = NULL;
spin_unlock_irqrestore(&lcu->lock, flags);
@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
" alias data in lcu (rc = %d), retry later", rc);
- schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
+ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
+ dasd_put_device(device);
} else {
+ dasd_put_device(device);
lcu->ruac_data.device = NULL;
lcu->flags &= ~UPDATE_PENDING;
}
@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
*/
if (!usedev)
return -EINVAL;
+ dasd_get_device(usedev);
lcu->ruac_data.device = usedev;
- schedule_delayed_work(&lcu->ruac_data.dwork, 0);
+ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
+ dasd_put_device(usedev);
return 0;
}
@@ -723,7 +731,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
ASCEBC((char *) &cqr->magic, 4);
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RSCK;
- ccw->flags = 0 ;
+ ccw->flags = CCW_FLAG_SLI;
ccw->count = 16;
ccw->cda = (__u32)(addr_t) cqr->data;
((char *)cqr->data)[0] = reason;
@@ -930,6 +938,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
/* 3. read new alias configuration */
_schedule_lcu_update(lcu, device);
lcu->suc_data.device = NULL;
+ dasd_put_device(device);
spin_unlock_irqrestore(&lcu->lock, flags);
}
@@ -989,6 +998,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
}
lcu->suc_data.reason = reason;
lcu->suc_data.device = device;
+ dasd_get_device(device);
spin_unlock(&lcu->lock);
- schedule_work(&lcu->suc_data.worker);
+ if (!schedule_work(&lcu->suc_data.worker))
+ dasd_put_device(device);
};
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index cb61f300f8b5..277b5c8c825c 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -67,7 +67,7 @@ static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
* and function code cmd.
* In case of an exception return 3. Otherwise return result of bitwise OR of
* resulting condition code and DIAG return code. */
-static inline int dia250(void *iob, int cmd)
+static inline int __dia250(void *iob, int cmd)
{
register unsigned long reg2 asm ("2") = (unsigned long) iob;
typedef union {
@@ -77,7 +77,6 @@ static inline int dia250(void *iob, int cmd)
int rc;
rc = 3;
- diag_stat_inc(DIAG_STAT_X250);
asm volatile(
" diag 2,%2,0x250\n"
"0: ipm %0\n"
@@ -91,6 +90,12 @@ static inline int dia250(void *iob, int cmd)
return rc;
}
+static inline int dia250(void *iob, int cmd)
+{
+ diag_stat_inc(DIAG_STAT_X250);
+ return __dia250(iob, cmd);
+}
+
/* Initialize block I/O to DIAG device using the specified blocksize and
* block offset. On success, return zero and set end_block to contain the
* number of blocks on the device minus the specified offset. Return non-zero
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 074878b55a0b..d044f3f273be 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -944,6 +944,7 @@ struct fib {
*/
struct list_head fiblink;
void *data;
+ u32 vector_no;
struct hw_fib *hw_fib_va; /* Actual shared object */
dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
};
@@ -2113,6 +2114,7 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
int aac_acquire_irq(struct aac_dev *dev);
void aac_free_irq(struct aac_dev *dev);
const char *aac_driverinfo(struct Scsi_Host *);
+void aac_fib_vector_assign(struct aac_dev *dev);
struct fib *aac_fib_alloc(struct aac_dev *dev);
int aac_fib_setup(struct aac_dev *dev);
void aac_fib_map_free(struct aac_dev *dev);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index a1f90fe849c9..4cbf54928640 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -83,13 +83,38 @@ static int fib_map_alloc(struct aac_dev *dev)
void aac_fib_map_free(struct aac_dev *dev)
{
- pci_free_consistent(dev->pdev,
- dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
- dev->hw_fib_va, dev->hw_fib_pa);
+ if (dev->hw_fib_va && dev->max_fib_size) {
+ pci_free_consistent(dev->pdev,
+ (dev->max_fib_size *
+ (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
+ dev->hw_fib_va, dev->hw_fib_pa);
+ }
dev->hw_fib_va = NULL;
dev->hw_fib_pa = 0;
}
+void aac_fib_vector_assign(struct aac_dev *dev)
+{
+ u32 i = 0;
+ u32 vector = 1;
+ struct fib *fibptr = NULL;
+
+ for (i = 0, fibptr = &dev->fibs[i];
+ i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
+ i++, fibptr++) {
+ if ((dev->max_msix == 1) ||
+ (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
+ - dev->vector_cap))) {
+ fibptr->vector_no = 0;
+ } else {
+ fibptr->vector_no = vector;
+ vector++;
+ if (vector == dev->max_msix)
+ vector = 1;
+ }
+ }
+}
+
/**
* aac_fib_setup - setup the fibs
* @dev: Adapter to set up
@@ -151,6 +176,12 @@ int aac_fib_setup(struct aac_dev * dev)
hw_fib_pa = hw_fib_pa +
dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
}
+
+ /*
+ *Assign vector numbers to fibs
+ */
+ aac_fib_vector_assign(dev);
+
/*
* Add the fib chain to the free list
*/
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 3b6e5c67e853..aa6eccb8940b 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1404,8 +1404,18 @@ static int aac_acquire_resources(struct aac_dev *dev)
aac_adapter_enable_int(dev);
- if (!dev->sync_mode)
+ /*max msix may change after EEH
+ * Re-assign vectors to fibs
+ */
+ aac_fib_vector_assign(dev);
+
+ if (!dev->sync_mode) {
+ /* After EEH recovery or suspend resume, max_msix count
+ * may change, therfore updating in init as well.
+ */
aac_adapter_start(dev);
+ dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
+ }
return 0;
error_iounmap:
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 2aa34ea8ceb1..bc0203f3d243 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -156,8 +156,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
break;
if (dev->msi_enabled && dev->max_msix > 1)
atomic_dec(&dev->rrq_outstanding[vector_no]);
- aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
dev->host_rrq[index++] = 0;
+ aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
if (index == (vector_no + 1) * dev->vector_cap)
index = vector_no * dev->vector_cap;
dev->host_rrq_idx[vector_no] = index;
@@ -452,36 +452,20 @@ static int aac_src_deliver_message(struct fib *fib)
#endif
u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
+ u16 vector_no;
atomic_inc(&q->numpending);
if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
dev->max_msix > 1) {
- u_int16_t vector_no, first_choice = 0xffff;
-
- vector_no = dev->fibs_pushed_no % dev->max_msix;
- do {
- vector_no += 1;
- if (vector_no == dev->max_msix)
- vector_no = 1;
- if (atomic_read(&dev->rrq_outstanding[vector_no]) <
- dev->vector_cap)
- break;
- if (0xffff == first_choice)
- first_choice = vector_no;
- else if (vector_no == first_choice)
- break;
- } while (1);
- if (vector_no == first_choice)
- vector_no = 0;
- atomic_inc(&dev->rrq_outstanding[vector_no]);
- if (dev->fibs_pushed_no == 0xffffffff)
- dev->fibs_pushed_no = 0;
- else
- dev->fibs_pushed_no++;
+ vector_no = fib->vector_no;
fib->hw_fib_va->header.Handle += (vector_no << 16);
+ } else {
+ vector_no = 0;
}
+ atomic_inc(&dev->rrq_outstanding[vector_no]);
+
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
/* Calculate the amount to the fibsize bits */
fibsize = (hdr_size + 127) / 128 - 1;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index b846a4683562..fc6a83188c1e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1336,6 +1336,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
case AHC_DEV_Q_TAGGED:
scsi_change_queue_depth(sdev,
dev->openings + dev->active);
+ break;
default:
/*
* We allow the OS to queue 2 untagged transactions to
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index fe0c5143f8e6..758f76e88704 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -4470,6 +4470,7 @@ put_shost:
scsi_host_put(phba->shost);
free_kset:
iscsi_boot_destroy_kset(phba->boot_kset);
+ phba->boot_kset = NULL;
return -ENOMEM;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 361358134315..93880ed6291c 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -562,7 +562,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
/*
* Command Lock contention
*/
- err = SCSI_DH_RETRY;
+ err = SCSI_DH_IMM_RETRY;
break;
default:
break;
@@ -612,6 +612,8 @@ retry:
err = mode_select_handle_sense(sdev, h->sense);
if (err == SCSI_DH_RETRY && retry_cnt--)
goto retry;
+ if (err == SCSI_DH_IMM_RETRY)
+ goto retry;
}
if (err == SCSI_DH_OK) {
h->state = RDAC_STATE_ACTIVE;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 536cd5a80422..43ac62623bf2 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4003,13 +4003,17 @@ static ssize_t ipr_store_update_fw(struct device *dev,
struct ipr_sglist *sglist;
char fname[100];
char *src;
- int len, result, dnld_size;
+ char *endline;
+ int result, dnld_size;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- len = snprintf(fname, 99, "%s", buf);
- fname[len-1] = '\0';
+ snprintf(fname, sizeof(fname), "%s", buf);
+
+ endline = strchr(fname, '\n');
+ if (endline)
+ *endline = '\0';
if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 16a1935cc9c1..e197c6f39de2 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2192,7 +2192,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
/* Clear outstanding commands array. */
for (que = 0; que < ha->max_req_queues; que++) {
req = ha->req_q_map[que];
- if (!req)
+ if (!req || !test_bit(que, ha->req_qid_map))
continue;
req->out_ptr = (void *)(req->ring + req->length);
*req->out_ptr = 0;
@@ -2209,7 +2209,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
for (que = 0; que < ha->max_rsp_queues; que++) {
rsp = ha->rsp_q_map[que];
- if (!rsp)
+ if (!rsp || !test_bit(que, ha->rsp_qid_map))
continue;
rsp->in_ptr = (void *)(rsp->ring + rsp->length);
*rsp->in_ptr = 0;
@@ -4961,7 +4961,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
for (i = 1; i < ha->max_rsp_queues; i++) {
rsp = ha->rsp_q_map[i];
- if (rsp) {
+ if (rsp && test_bit(i, ha->rsp_qid_map)) {
rsp->options &= ~BIT_0;
ret = qla25xx_init_rsp_que(base_vha, rsp);
if (ret != QLA_SUCCESS)
@@ -4976,8 +4976,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
}
for (i = 1; i < ha->max_req_queues; i++) {
req = ha->req_q_map[i];
- if (req) {
- /* Clear outstanding commands array. */
+ if (req && test_bit(i, ha->req_qid_map)) {
+ /* Clear outstanding commands array. */
req->options &= ~BIT_0;
ret = qla25xx_init_req_que(base_vha, req);
if (ret != QLA_SUCCESS)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ccf6a7f99024..0e59731f95ad 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3018,9 +3018,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
"MSI-X: Failed to enable support "
"-- %d/%d\n Retry with %d vectors.\n",
ha->msix_count, ret, ret);
+ ha->msix_count = ret;
+ ha->max_rsp_queues = ha->msix_count - 1;
}
- ha->msix_count = ret;
- ha->max_rsp_queues = ha->msix_count - 1;
ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
ha->msix_count, GFP_KERNEL);
if (!ha->msix_entries) {
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c5dd594f6c31..cf7ba52bae66 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -600,7 +600,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
/* Delete request queues */
for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
req = ha->req_q_map[cnt];
- if (req) {
+ if (req && test_bit(cnt, ha->req_qid_map)) {
ret = qla25xx_delete_req_que(vha, req);
if (ret != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x00ea,
@@ -614,7 +614,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
/* Delete response queues */
for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
rsp = ha->rsp_q_map[cnt];
- if (rsp) {
+ if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
ret = qla25xx_delete_rsp_que(vha, rsp);
if (ret != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x00eb,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index bfa9a64c316b..fc6674db4f2d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -397,6 +397,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
int cnt;
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
+ if (!test_bit(cnt, ha->req_qid_map))
+ continue;
+
req = ha->req_q_map[cnt];
qla2x00_free_req_que(ha, req);
}
@@ -404,6 +407,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
ha->req_q_map = NULL;
for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
+ if (!test_bit(cnt, ha->rsp_qid_map))
+ continue;
+
rsp = ha->rsp_q_map[cnt];
qla2x00_free_rsp_que(ha, rsp);
}
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index ddbe2e7ac14d..c3e622524604 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
for (i = 0; i < vha->hw->max_req_queues; i++) {
struct req_que *req = vha->hw->req_q_map[i];
+
+ if (!test_bit(i, vha->hw->req_qid_map))
+ continue;
+
if (req || !buf) {
length = req ?
req->length : REQUEST_ENTRY_CNT_24XX;
@@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
} else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
for (i = 0; i < vha->hw->max_rsp_queues; i++) {
struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+
+ if (!test_bit(i, vha->hw->rsp_qid_map))
+ continue;
+
if (rsp || !buf) {
length = rsp ?
rsp->length : RESPONSE_ENTRY_CNT_MQ;
@@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
for (i = 0; i < vha->hw->max_req_queues; i++) {
struct req_que *req = vha->hw->req_q_map[i];
+
+ if (!test_bit(i, vha->hw->req_qid_map))
+ continue;
+
if (req || !buf) {
qla27xx_insert16(i, buf, len);
qla27xx_insert16(1, buf, len);
@@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
} else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
for (i = 0; i < vha->hw->max_rsp_queues; i++) {
struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+
+ if (!test_bit(i, vha->hw->rsp_qid_map))
+ continue;
+
if (rsp || !buf) {
qla27xx_insert16(i, buf, len);
qla27xx_insert16(1, buf, len);
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index c126966130ab..ce79de822e46 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -278,8 +278,16 @@ int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
ucp[3] = 0;
put_unaligned_be64(info, &ucp[4]);
} else if ((buf[0] & 0x7f) == 0x70) {
- buf[0] |= 0x80;
- put_unaligned_be64(info, &buf[3]);
+ /*
+ * Only set the 'VALID' bit if we can represent the value
+ * correctly; otherwise just fill out the lower bytes and
+ * clear the 'VALID' flag.
+ */
+ if (info <= 0xffffffffUL)
+ buf[0] |= 0x80;
+ else
+ buf[0] &= 0x7f;
+ put_unaligned_be32((u32)info, &buf[3]);
}
return 0;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 2c1160c7ec92..da2e068ee47d 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -205,6 +205,7 @@ static struct {
{"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
{"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
{"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
+ {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES},
{"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
{"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
@@ -227,6 +228,7 @@ static struct {
{"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
{"Promise", "", NULL, BLIST_SPARSELUN},
{"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
+ {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
{"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
{"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
{"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 21930c9ac9cd..c8115b4fe474 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1192,16 +1192,18 @@ static void __scsi_remove_target(struct scsi_target *starget)
void scsi_remove_target(struct device *dev)
{
struct Scsi_Host *shost = dev_to_shost(dev->parent);
- struct scsi_target *starget;
+ struct scsi_target *starget, *last_target = NULL;
unsigned long flags;
restart:
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(starget, &shost->__targets, siblings) {
- if (starget->state == STARGET_DEL)
+ if (starget->state == STARGET_DEL ||
+ starget == last_target)
continue;
if (starget->dev.parent == dev || &starget->dev == dev) {
kref_get(&starget->reap_ref);
+ last_target = starget;
spin_unlock_irqrestore(shost->host_lock, flags);
__scsi_remove_target(starget);
scsi_target_reap(starget);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 4e08d1cd704d..0d7c6e86f149 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -648,7 +648,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
*/
if (sdkp->lbprz) {
q->limits.discard_alignment = 0;
- q->limits.discard_granularity = 1;
+ q->limits.discard_granularity = logical_block_size;
} else {
q->limits.discard_alignment = sdkp->unmap_alignment *
logical_block_size;
@@ -1275,18 +1275,19 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
struct scsi_device *sdp = sdkp->device;
struct Scsi_Host *host = sdp->host;
+ sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
int diskinfo[4];
/* default to most commonly used values */
- diskinfo[0] = 0x40; /* 1 << 6 */
- diskinfo[1] = 0x20; /* 1 << 5 */
- diskinfo[2] = sdkp->capacity >> 11;
-
+ diskinfo[0] = 0x40; /* 1 << 6 */
+ diskinfo[1] = 0x20; /* 1 << 5 */
+ diskinfo[2] = capacity >> 11;
+
/* override with calculated, extended default, or driver values */
if (host->hostt->bios_param)
- host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo);
+ host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
else
- scsicam_bios_param(bdev, sdkp->capacity, diskinfo);
+ scsicam_bios_param(bdev, capacity, diskinfo);
geo->heads = diskinfo[0];
geo->sectors = diskinfo[1];
@@ -2337,14 +2338,6 @@ got_data:
if (sdkp->capacity > 0xffffffff)
sdp->use_16_for_rw = 1;
- /* Rescale capacity to 512-byte units */
- if (sector_size == 4096)
- sdkp->capacity <<= 3;
- else if (sector_size == 2048)
- sdkp->capacity <<= 2;
- else if (sector_size == 1024)
- sdkp->capacity <<= 1;
-
blk_queue_physical_block_size(sdp->request_queue,
sdkp->physical_block_size);
sdkp->device->sector_size = sector_size;
@@ -2812,11 +2805,6 @@ static int sd_try_extended_inquiry(struct scsi_device *sdp)
return 0;
}
-static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
-{
- return blocks << (ilog2(sdev->sector_size) - 9);
-}
-
/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
@@ -2893,14 +2881,14 @@ static int sd_revalidate_disk(struct gendisk *disk)
sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
rw_max = q->limits.io_opt =
- logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
+ sdkp->opt_xfer_blocks * sdp->sector_size;
else
rw_max = BLK_DEF_MAX_SECTORS;
/* Combine with controller limits */
q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
- set_capacity(disk, sdkp->capacity);
+ set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
sd_config_write_same(sdkp);
kfree(buffer);
@@ -3268,8 +3256,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
struct scsi_disk *sdkp = dev_get_drvdata(dev);
int ret = 0;
- if (!sdkp)
- return 0; /* this can happen */
+ if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
+ return 0;
if (sdkp->WCE && sdkp->media_present) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
@@ -3308,6 +3296,9 @@ static int sd_resume(struct device *dev)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
+ return 0;
+
if (!sdkp->device->manage_start_stop)
return 0;
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 5f2a84aff29f..654630bb7d0e 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -65,7 +65,7 @@ struct scsi_disk {
struct device dev;
struct gendisk *disk;
atomic_t openers;
- sector_t capacity; /* size in 512-byte sectors */
+ sector_t capacity; /* size in logical blocks */
u32 max_xfer_blocks;
u32 opt_xfer_blocks;
u32 max_ws_blocks;
@@ -146,6 +146,11 @@ static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
return 0;
}
+static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks)
+{
+ return blocks << (ilog2(sdev->sector_size) - 9);
+}
+
/*
* A DIF-capable target device can be formatted with different
* protection schemes. Currently 0 through 3 are defined:
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 503ab8b46c0b..ae7d9bdf409c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -652,7 +652,8 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
else
hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
hp->dxfer_len = mxsize;
- if (hp->dxfer_direction == SG_DXFER_TO_DEV)
+ if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
+ (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
hp->dxferp = (char __user *)buf + cmd_size;
else
hp->dxferp = NULL;
@@ -1261,7 +1262,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
}
sfp->mmap_called = 1;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
return 0;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 8bd54a64efd6..64c867405ad4 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -144,6 +144,9 @@ static int sr_runtime_suspend(struct device *dev)
{
struct scsi_cd *cd = dev_get_drvdata(dev);
+ if (!cd) /* E.g.: runtime suspend following sr_remove() */
+ return 0;
+
if (cd->media_present)
return -EBUSY;
else
@@ -985,6 +988,7 @@ static int sr_remove(struct device *dev)
scsi_autopm_get_device(cd->device);
del_gendisk(cd->disk);
+ dev_set_drvdata(dev, NULL);
mutex_lock(&sr_ref_mutex);
kref_put(&cd->kref, sr_kref_release);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 3fba42ad9fb8..0f636cc4c809 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -889,8 +889,9 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
do_work = true;
process_err_fn = storvsc_remove_lun;
break;
- case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
- if ((asc == 0x2a) && (ascq == 0x9)) {
+ case SRB_STATUS_ABORTED:
+ if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
+ (asc == 0x2a) && (ascq == 0x9)) {
do_work = true;
process_err_fn = storvsc_device_scan;
/*
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 91a003011acf..a9bac3bf20de 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
static int __init sh_pm_runtime_init(void)
{
- if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
+ if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
if (!of_find_compatible_node(NULL, NULL,
"renesas,cpg-mstp-clocks"))
return 0;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index aebad36391c9..8feac599e9ab 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1571,6 +1571,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
as->use_cs_gpios = true;
if (atmel_spi_is_v2(as) &&
+ pdev->dev.of_node &&
!of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
as->use_cs_gpios = false;
master->num_chipselect = 4;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 1f8903d356e5..ed8283e7397a 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1024,6 +1024,16 @@ static int omap2_mcspi_setup(struct spi_device *spi)
spi->controller_state = cs;
/* Link this to context save list */
list_add_tail(&cs->node, &ctx->cs);
+
+ if (gpio_is_valid(spi->cs_gpio)) {
+ ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
+ if (ret) {
+ dev_err(&spi->dev, "failed to request gpio\n");
+ return ret;
+ }
+ gpio_direction_output(spi->cs_gpio,
+ !(spi->mode & SPI_CS_HIGH));
+ }
}
if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
@@ -1032,15 +1042,6 @@ static int omap2_mcspi_setup(struct spi_device *spi)
return ret;
}
- if (gpio_is_valid(spi->cs_gpio)) {
- ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
- if (ret) {
- dev_err(&spi->dev, "failed to request gpio\n");
- return ret;
- }
- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
- }
-
ret = pm_runtime_get_sync(mcspi->dev);
if (ret < 0)
return ret;
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index 356e10969272..970cce136a58 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -27,6 +27,12 @@ config ION_DUMMY
one doesn't have access to hardware drivers that
use ION.
+config ION_HISI
+ tristate "Ion for Hisi"
+ depends on ARCH_HISI && ION
+ help
+ Choose this option if you wish to use ion on an Hisilicon platform
+
config ION_TEGRA
tristate "Ion for Tegra"
depends on ARCH_TEGRA && ION
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
index b56fd2bf2b4f..4d57f46133bf 100644
--- a/drivers/staging/android/ion/Makefile
+++ b/drivers/staging/android/ion/Makefile
@@ -7,4 +7,4 @@ endif
obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o
obj-$(CONFIG_ION_TEGRA) += tegra/
-
+obj-$(CONFIG_ION_HISI) += hisilicon/
diff --git a/drivers/staging/android/ion/hisilicon/Makefile b/drivers/staging/android/ion/hisilicon/Makefile
new file mode 100644
index 000000000000..f123a57ce456
--- /dev/null
+++ b/drivers/staging/android/ion/hisilicon/Makefile
@@ -0,0 +1,2 @@
+ccflags-y += -I$(srctree)/drivers/staging/android
+obj-y += hisi_ion.o
diff --git a/drivers/staging/android/ion/hisilicon/hisi_ion.c b/drivers/staging/android/ion/hisilicon/hisi_ion.c
new file mode 100644
index 000000000000..7c057f5951c1
--- /dev/null
+++ b/drivers/staging/android/ion/hisilicon/hisi_ion.c
@@ -0,0 +1,266 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#define pr_fmt(fmt) "ion: " fmt
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/hisi_ion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/dma-mapping.h>
+#include "../ion_priv.h"
+
+struct hisi_ion_name_id_table {
+ const char *name;
+ unsigned int id;
+};
+
+static struct hisi_ion_name_id_table name_id_table[] __initdata = {
+ {"fb", ION_FB_HEAP_ID},
+ {"vpu", ION_VPU_HEAP_ID},
+ {"jpu", ION_JPU_HEAP_ID},
+ {"gralloc-carveout", ION_GRALLOC_HEAP_ID},
+ {"overlay", ION_OVERLAY_HEAP_ID},
+ {"sys_user", ION_SYSTEM_HEAP_ID},
+ {"sys_contig", ION_SYSTEM_CONTIG_HEAP_ID},
+ {"cma", ION_HEAP_TYPE_DMA},
+};
+
+struct hisi_ion_type_id_table {
+ const char *name;
+ enum ion_heap_type type;
+};
+
+static struct hisi_ion_type_id_table type_id_table[] = {
+ {"ion_system_contig", ION_HEAP_TYPE_SYSTEM_CONTIG},
+ {"ion_system", ION_HEAP_TYPE_SYSTEM},
+ {"ion_carveout", ION_HEAP_TYPE_CARVEOUT},
+ {"ion_chunk", ION_HEAP_TYPE_CHUNK},
+ {"ion_dma", ION_HEAP_TYPE_DMA},
+ {"ion_custom", ION_HEAP_TYPE_CUSTOM},
+ {"ion_cma", ION_HEAP_TYPE_DMA},
+};
+
+#define HISI_ION_HEAP_NUM 16
+
+static struct ion_platform_data hisi_ion_platform_data = {0};
+static struct ion_platform_heap hisi_ion_platform_heap[HISI_ION_HEAP_NUM] = {{0} };
+
+static struct ion_device *hisi_ion_device;
+static struct ion_heap *hisi_ion_heap[HISI_ION_HEAP_NUM] = {NULL};
+
+int hisi_ion_get_heap_info(unsigned int id, struct ion_heap_info_data *data)
+{
+ int i;
+
+ BUG_ON(!data);
+
+ for (i = 0; i < hisi_ion_platform_data.nr; i++) {
+ if (hisi_ion_platform_heap[i].id == id) {
+ data->heap_phy = hisi_ion_platform_heap[i].base;
+ data->heap_size = hisi_ion_platform_heap[i].size;
+ strncpy((void *)data->name, (void *)hisi_ion_platform_heap[i].name, HISI_ION_NAME_LEN);
+ pr_info("heap info : id %d name %s phy 0x%llx size %u\n",
+ id, data->name, data->heap_phy, data->heap_size);
+ return 0;
+ }
+ }
+ pr_err("in %s please check the id %d\n", __func__, id);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(hisi_ion_get_heap_info);
+
+struct ion_device *get_ion_device(void)
+{
+ return hisi_ion_device;
+}
+EXPORT_SYMBOL(get_ion_device);
+
+static int __init get_id_by_name(const char *name, unsigned int *id)
+{
+ int i, n;
+
+ n = sizeof(name_id_table)/sizeof(name_id_table[0]);
+ for (i = 0; i < n; i++) {
+ if (strncmp(name, name_id_table[i].name, HISI_ION_NAME_LEN))
+ continue;
+
+ *id = name_id_table[i].id;
+ return 0;
+ }
+ return -1;
+}
+
+static int __init get_type_by_name(const char *name, enum ion_heap_type *type)
+{
+ int i, n;
+
+ n = sizeof(type_id_table)/sizeof(type_id_table[0]);
+ for (i = 0; i < n; i++) {
+ if (strncmp(name, type_id_table[i].name, HISI_ION_NAME_LEN))
+ continue;
+
+ *type = type_id_table[i].type;
+ return 0;
+ }
+
+ return -1;
+}
+
+static u64 hisi_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device ion_cma_device = {
+ .name = "ion-cma-device",
+ .id = -1,
+ .dev = {
+ .dma_mask = &hisi_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
+};
+
+static int __init hisi_ion_setup_platform_data(struct platform_device *dev)
+{
+ struct device_node *node, *np;
+ const char *heap_name;
+ const char *type_name;
+ unsigned int id;
+ unsigned int range[2] = {0, 0};
+ enum ion_heap_type type;
+ int ret;
+ int index = 0;
+
+ node = dev->dev.of_node;
+ for_each_child_of_node(node, np) {
+ ret = of_property_read_string(np, "heap-name", &heap_name);
+ if (ret < 0) {
+ pr_err("in node %s please check the name property of node %s\n", __func__, np->name);
+ continue;
+ }
+
+ ret = get_id_by_name(heap_name, &id);
+ if (ret < 0) {
+ pr_err("in node %s please check the name %s\n", __func__, heap_name);
+ continue;
+ }
+
+ ret = of_property_read_u32_array(np, "heap-range", range, ARRAY_SIZE(range));
+ if (ret < 0) {
+ pr_err("in node %s please check the range property of node %s\n", __func__, np->name);
+ continue;
+ }
+
+
+ ret = of_property_read_string(np, "heap-type", &type_name);
+ if (ret < 0) {
+ pr_err("in node %s please check the type property of node %s\n", __func__, np->name);
+ continue;
+ }
+
+ ret = get_type_by_name(type_name, &type);
+ if (ret < 0) {
+ pr_err("in node %s please check the type %s\n", __func__, type_name);
+ continue;
+ }
+
+ hisi_ion_platform_heap[index].name = heap_name;
+ hisi_ion_platform_heap[index].base = range[0];
+ hisi_ion_platform_heap[index].size = range[1];
+ hisi_ion_platform_heap[index].id = id;
+ hisi_ion_platform_heap[index].type = type;
+ if (type == ION_HEAP_TYPE_DMA) {
+ // ion_cma_device.dev.archdata.dma_ops = swiotlb_dma_ops;
+ hisi_ion_platform_heap[index].priv =
+ (void *)&ion_cma_device.dev;
+ }
+ index++;
+ }
+
+ hisi_ion_platform_data.nr = index;
+ hisi_ion_platform_data.heaps = hisi_ion_platform_heap;
+
+ return 0;
+}
+
+static int __init hisi_ion_probe(struct platform_device *pdev)
+{
+ int i, err;
+ struct ion_heap *heap;
+ struct ion_platform_heap *heap_data;
+
+ if (hisi_ion_setup_platform_data(pdev)) {
+ pr_err("hisi_ion_setup_platform_data is failed\n");
+ return -EINVAL;
+ }
+
+ hisi_ion_device = ion_device_create(NULL);
+ if (IS_ERR_OR_NULL(hisi_ion_device))
+ return PTR_ERR(hisi_ion_device);
+ /*
+ * create the heaps as specified in the board file
+ */
+ for (i = 0; i < hisi_ion_platform_data.nr; i++) {
+ heap_data = &hisi_ion_platform_data.heaps[i];
+ heap = ion_heap_create(heap_data);
+ if (IS_ERR_OR_NULL(heap)) {
+ err = PTR_ERR(heap);
+ goto out;
+ }
+
+ ion_device_add_heap(hisi_ion_device, heap);
+ hisi_ion_heap[i] = heap;
+ }
+ platform_set_drvdata(pdev, hisi_ion_device);
+
+ return 0;
+out:
+ for (i = 0; i < HISI_ION_HEAP_NUM; i++) {
+ if (!hisi_ion_heap[i])
+ continue;
+ ion_heap_destroy(hisi_ion_heap[i]);
+ hisi_ion_heap[i] = NULL;
+ }
+ return err;
+}
+
+static int hisi_ion_remove(struct platform_device *pdev)
+{
+ int i;
+
+ ion_device_destroy(hisi_ion_device);
+ for (i = 0; i < HISI_ION_HEAP_NUM; i++) {
+ if (!hisi_ion_heap[i])
+ continue;
+ ion_heap_destroy(hisi_ion_heap[i]);
+ hisi_ion_heap[i] = NULL;
+ }
+
+ return 0;
+}
+
+static struct of_device_id hisi_ion_match_table[] = {
+ {.compatible = "hisilicon,ion"},
+ {},
+};
+
+static struct platform_driver hisi_ion_driver = {
+ .probe = hisi_ion_probe,
+ .remove = hisi_ion_remove,
+ .driver = {
+ .name = "ion",
+ .of_match_table = hisi_ion_match_table,
+ },
+};
+
+module_platform_driver(hisi_ion_driver);
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index e237e9f3312d..df560216d702 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -251,8 +251,10 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
* memory coming from the heaps is ready for dma, ie if it has a
* cached mapping that mapping has been invalidated
*/
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
sg_dma_address(sg) = sg_phys(sg);
+ sg_dma_len(sg) = sg->length;
+ }
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
index b8dcf5a26cc4..58d46893e5ff 100644
--- a/drivers/staging/android/ion/ion_test.c
+++ b/drivers/staging/android/ion/ion_test.c
@@ -285,8 +285,8 @@ static int __init ion_test_init(void)
{
ion_test_pdev = platform_device_register_simple("ion-test",
-1, NULL, 0);
- if (!ion_test_pdev)
- return -ENODEV;
+ if (IS_ERR(ion_test_pdev))
+ return PTR_ERR(ion_test_pdev);
return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
}
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 6cc304a4c59b..27fbf1a81097 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -246,24 +246,24 @@ static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
{
if (dev->mmio)
writel(data, dev->mmio + reg);
-
- outl(data, dev->iobase + reg);
+ else
+ outl(data, dev->iobase + reg);
}
static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
{
if (dev->mmio)
writew(data, dev->mmio + reg);
-
- outw(data, dev->iobase + reg);
+ else
+ outw(data, dev->iobase + reg);
}
static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
{
if (dev->mmio)
writeb(data, dev->mmio + reg);
-
- outb(data, dev->iobase + reg);
+ else
+ outb(data, dev->iobase + reg);
}
static uint32_t ni_readl(struct comedi_device *dev, int reg)
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 437f723bb34d..823e47910004 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -92,7 +92,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev,
unsigned long flags;
int ret = 0;
- if (trig_num != cmd->start_src)
+ if (trig_num != cmd->start_arg)
return -EINVAL;
spin_lock_irqsave(&counter->lock, flags);
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index 79ac19246548..70b8f4fabfad 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -825,8 +825,7 @@ static void lcd_write_cmd_s(int cmd)
lcd_send_serial(0x1F); /* R/W=W, RS=0 */
lcd_send_serial(cmd & 0x0F);
lcd_send_serial((cmd >> 4) & 0x0F);
- /* the shortest command takes at least 40 us */
- usleep_range(40, 100);
+ udelay(40); /* the shortest command takes at least 40 us */
spin_unlock_irq(&pprt_lock);
}
@@ -837,8 +836,7 @@ static void lcd_write_data_s(int data)
lcd_send_serial(0x5F); /* R/W=W, RS=1 */
lcd_send_serial(data & 0x0F);
lcd_send_serial((data >> 4) & 0x0F);
- /* the shortest data takes at least 40 us */
- usleep_range(40, 100);
+ udelay(40); /* the shortest data takes at least 40 us */
spin_unlock_irq(&pprt_lock);
}
@@ -848,20 +846,19 @@ static void lcd_write_cmd_p8(int cmd)
spin_lock_irq(&pprt_lock);
/* present the data to the data port */
w_dtr(pprt, cmd);
- /* maintain the data during 20 us before the strobe */
- usleep_range(20, 100);
+ udelay(20); /* maintain the data during 20 us before the strobe */
bits.e = BIT_SET;
bits.rs = BIT_CLR;
bits.rw = BIT_CLR;
set_ctrl_bits();
- usleep_range(40, 100); /* maintain the strobe during 40 us */
+ udelay(40); /* maintain the strobe during 40 us */
bits.e = BIT_CLR;
set_ctrl_bits();
- usleep_range(120, 500); /* the shortest command takes at least 120 us */
+ udelay(120); /* the shortest command takes at least 120 us */
spin_unlock_irq(&pprt_lock);
}
@@ -871,20 +868,19 @@ static void lcd_write_data_p8(int data)
spin_lock_irq(&pprt_lock);
/* present the data to the data port */
w_dtr(pprt, data);
- /* maintain the data during 20 us before the strobe */
- usleep_range(20, 100);
+ udelay(20); /* maintain the data during 20 us before the strobe */
bits.e = BIT_SET;
bits.rs = BIT_SET;
bits.rw = BIT_CLR;
set_ctrl_bits();
- usleep_range(40, 100); /* maintain the strobe during 40 us */
+ udelay(40); /* maintain the strobe during 40 us */
bits.e = BIT_CLR;
set_ctrl_bits();
- usleep_range(45, 100); /* the shortest data takes at least 45 us */
+ udelay(45); /* the shortest data takes at least 45 us */
spin_unlock_irq(&pprt_lock);
}
@@ -894,7 +890,7 @@ static void lcd_write_cmd_tilcd(int cmd)
spin_lock_irq(&pprt_lock);
/* present the data to the control port */
w_ctr(pprt, cmd);
- usleep_range(60, 120);
+ udelay(60);
spin_unlock_irq(&pprt_lock);
}
@@ -904,7 +900,7 @@ static void lcd_write_data_tilcd(int data)
spin_lock_irq(&pprt_lock);
/* present the data to the data port */
w_dtr(pprt, data);
- usleep_range(60, 120);
+ udelay(60);
spin_unlock_irq(&pprt_lock);
}
@@ -947,7 +943,7 @@ static void lcd_clear_fast_s(void)
lcd_send_serial(0x5F); /* R/W=W, RS=1 */
lcd_send_serial(' ' & 0x0F);
lcd_send_serial((' ' >> 4) & 0x0F);
- usleep_range(40, 100); /* the shortest data takes at least 40 us */
+ udelay(40); /* the shortest data takes at least 40 us */
}
spin_unlock_irq(&pprt_lock);
@@ -971,7 +967,7 @@ static void lcd_clear_fast_p8(void)
w_dtr(pprt, ' ');
/* maintain the data during 20 us before the strobe */
- usleep_range(20, 100);
+ udelay(20);
bits.e = BIT_SET;
bits.rs = BIT_SET;
@@ -979,13 +975,13 @@ static void lcd_clear_fast_p8(void)
set_ctrl_bits();
/* maintain the strobe during 40 us */
- usleep_range(40, 100);
+ udelay(40);
bits.e = BIT_CLR;
set_ctrl_bits();
/* the shortest data takes at least 45 us */
- usleep_range(45, 100);
+ udelay(45);
}
spin_unlock_irq(&pprt_lock);
@@ -1007,7 +1003,7 @@ static void lcd_clear_fast_tilcd(void)
for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
/* present the data to the data port */
w_dtr(pprt, ' ');
- usleep_range(60, 120);
+ udelay(60);
}
spin_unlock_irq(&pprt_lock);
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index aa5ab6c80ed4..41ef099b7aa6 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -142,7 +142,9 @@ static void __speakup_paste_selection(struct work_struct *work)
struct tty_ldisc *ld;
DECLARE_WAITQUEUE(wait, current);
- ld = tty_ldisc_ref_wait(tty);
+ ld = tty_ldisc_ref(tty);
+ if (!ld)
+ goto tty_unref;
tty_buffer_lock_exclusive(&vc->port);
add_wait_queue(&vc->paste_wait, &wait);
@@ -162,6 +164,7 @@ static void __speakup_paste_selection(struct work_struct *work)
tty_buffer_unlock_exclusive(&vc->port);
tty_ldisc_deref(ld);
+tty_unref:
tty_kref_put(tty);
}
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index 3b5835b28128..a5bbb338f275 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -6,6 +6,11 @@
#include "spk_priv.h"
#include "serialio.h"
+#include <linux/serial_core.h>
+/* WARNING: Do not change this to <linux/serial.h> without testing that
+ * SERIAL_PORT_DFNS does get defined to the appropriate value. */
+#include <asm/serial.h>
+
#ifndef SERIAL_PORT_DFNS
#define SERIAL_PORT_DFNS
#endif
@@ -23,9 +28,15 @@ const struct old_serial_port *spk_serial_init(int index)
int baud = 9600, quot = 0;
unsigned int cval = 0;
int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8;
- const struct old_serial_port *ser = rs_table + index;
+ const struct old_serial_port *ser;
int err;
+ if (index >= ARRAY_SIZE(rs_table)) {
+ pr_info("no port info for ttyS%d\n", index);
+ return NULL;
+ }
+ ser = rs_table + index;
+
/* Divisor, bytesize and parity */
quot = ser->baud_base / baud;
cval = cflag & (CSIZE | CSTOPB);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 255204cc43e6..b4bfd706ac94 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1593,7 +1593,8 @@ static int lio_tpg_check_prot_fabric_only(
}
/*
- * Called with spin_lock_bh(struct se_portal_group->session_lock) held..
+ * Called with spin_lock_irq(struct se_portal_group->session_lock) held
+ * or not held.
*
* Also, this function calls iscsit_inc_session_usage_count() on the
* struct iscsi_session in question.
@@ -1601,19 +1602,32 @@ static int lio_tpg_check_prot_fabric_only(
static int lio_tpg_shutdown_session(struct se_session *se_sess)
{
struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+ struct se_portal_group *se_tpg = se_sess->se_tpg;
+ bool local_lock = false;
+
+ if (!spin_is_locked(&se_tpg->session_lock)) {
+ spin_lock_irq(&se_tpg->session_lock);
+ local_lock = true;
+ }
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
+ if (local_lock)
+ spin_unlock_irq(&sess->conn_lock);
return 0;
}
atomic_set(&sess->session_reinstatement, 1);
spin_unlock(&sess->conn_lock);
iscsit_stop_time2retain_timer(sess);
+ spin_unlock_irq(&se_tpg->session_lock);
+
iscsit_stop_session(sess, 1, 1);
+ if (!local_lock)
+ spin_lock_irq(&se_tpg->session_lock);
return 1;
}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 88ea4e4f124b..3436a83568ea 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -826,6 +826,49 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
return dev;
}
+/*
+ * Check if the underlying struct block_device request_queue supports
+ * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
+ * in ATA and we need to set TPE=1
+ */
+bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
+ struct request_queue *q, int block_size)
+{
+ if (!blk_queue_discard(q))
+ return false;
+
+ attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
+ block_size;
+ /*
+ * Currently hardcoded to 1 in Linux/SCSI code..
+ */
+ attrib->max_unmap_block_desc_count = 1;
+ attrib->unmap_granularity = q->limits.discard_granularity / block_size;
+ attrib->unmap_granularity_alignment = q->limits.discard_alignment /
+ block_size;
+ return true;
+}
+EXPORT_SYMBOL(target_configure_unmap_from_queue);
+
+/*
+ * Convert from blocksize advertised to the initiator to the 512 byte
+ * units unconditionally used by the Linux block layer.
+ */
+sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
+{
+ switch (dev->dev_attrib.block_size) {
+ case 4096:
+ return lb << 3;
+ case 2048:
+ return lb << 2;
+ case 1024:
+ return lb << 1;
+ default:
+ return lb;
+ }
+}
+EXPORT_SYMBOL(target_to_linux_sector);
+
int target_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index e3195700211a..75f0f08b2a34 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -160,25 +160,11 @@ static int fd_configure_device(struct se_device *dev)
" block_device blocks: %llu logical_block_size: %d\n",
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
- /*
- * Check if the underlying struct block_device request_queue supports
- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
- * in ATA and we need to set TPE=1
- */
- if (blk_queue_discard(q)) {
- dev->dev_attrib.max_unmap_lba_count =
- q->limits.max_discard_sectors;
- /*
- * Currently hardcoded to 1 in Linux/SCSI code..
- */
- dev->dev_attrib.max_unmap_block_desc_count = 1;
- dev->dev_attrib.unmap_granularity =
- q->limits.discard_granularity >> 9;
- dev->dev_attrib.unmap_granularity_alignment =
- q->limits.discard_alignment;
+
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
+ fd_dev->fd_block_size))
pr_debug("IFILE: BLOCK Discard support available,"
- " disabled by default\n");
- }
+ " disabled by default\n");
/*
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -490,9 +476,12 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
if (S_ISBLK(inode->i_mode)) {
/* The backend is block device, use discard */
struct block_device *bdev = inode->i_bdev;
+ struct se_device *dev = cmd->se_dev;
- ret = blkdev_issue_discard(bdev, lba,
- nolb, GFP_KERNEL, 0);
+ ret = blkdev_issue_discard(bdev,
+ target_to_linux_sector(dev, lba),
+ target_to_linux_sector(dev, nolb),
+ GFP_KERNEL, 0);
if (ret < 0) {
pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
ret);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index f29c69120054..2c53dcefff3e 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -121,27 +121,11 @@ static int iblock_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
- /*
- * Check if the underlying struct block_device request_queue supports
- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
- * in ATA and we need to set TPE=1
- */
- if (blk_queue_discard(q)) {
- dev->dev_attrib.max_unmap_lba_count =
- q->limits.max_discard_sectors;
-
- /*
- * Currently hardcoded to 1 in Linux/SCSI code..
- */
- dev->dev_attrib.max_unmap_block_desc_count = 1;
- dev->dev_attrib.unmap_granularity =
- q->limits.discard_granularity >> 9;
- dev->dev_attrib.unmap_granularity_alignment =
- q->limits.discard_alignment;
-
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
+ dev->dev_attrib.hw_block_size))
pr_debug("IBLOCK: BLOCK Discard support available,"
- " disabled by default\n");
- }
+ " disabled by default\n");
+
/*
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -413,9 +397,13 @@ static sense_reason_t
iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
{
struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
+ struct se_device *dev = cmd->se_dev;
int ret;
- ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
+ ret = blkdev_issue_discard(bdev,
+ target_to_linux_sector(dev, lba),
+ target_to_linux_sector(dev, nolb),
+ GFP_KERNEL, 0);
if (ret < 0) {
pr_err("blkdev_issue_discard() failed: %d\n", ret);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -431,8 +419,10 @@ iblock_execute_write_same(struct se_cmd *cmd)
struct scatterlist *sg;
struct bio *bio;
struct bio_list list;
- sector_t block_lba = cmd->t_task_lba;
- sector_t sectors = sbc_get_write_same_sectors(cmd);
+ struct se_device *dev = cmd->se_dev;
+ sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
+ sector_t sectors = target_to_linux_sector(dev,
+ sbc_get_write_same_sectors(cmd));
if (cmd->prot_op) {
pr_err("WRITE_SAME: Protection information with IBLOCK"
@@ -646,12 +636,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
{
struct se_device *dev = cmd->se_dev;
+ sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
struct iblock_req *ibr;
struct bio *bio, *bio_start;
struct bio_list list;
struct scatterlist *sg;
u32 sg_num = sgl_nents;
- sector_t block_lba;
unsigned bio_cnt;
int rw = 0;
int i;
@@ -677,24 +667,6 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
rw = READ;
}
- /*
- * Convert the blocksize advertised to the initiator to the 512 byte
- * units unconditionally used by the Linux block layer.
- */
- if (dev->dev_attrib.block_size == 4096)
- block_lba = (cmd->t_task_lba << 3);
- else if (dev->dev_attrib.block_size == 2048)
- block_lba = (cmd->t_task_lba << 2);
- else if (dev->dev_attrib.block_size == 1024)
- block_lba = (cmd->t_task_lba << 1);
- else if (dev->dev_attrib.block_size == 512)
- block_lba = cmd->t_task_lba;
- else {
- pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
- " %u\n", dev->dev_attrib.block_size);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
-
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!ibr)
goto fail;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 28fb3016370f..46b1991fbb50 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -68,23 +68,25 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
if (dev) {
spin_lock_irqsave(&dev->se_tmr_lock, flags);
- list_del(&tmr->tmr_list);
+ list_del_init(&tmr->tmr_list);
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
}
kfree(tmr);
}
-static void core_tmr_handle_tas_abort(
- struct se_node_acl *tmr_nacl,
- struct se_cmd *cmd,
- int tas)
+static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
{
- bool remove = true;
+ unsigned long flags;
+ bool remove = true, send_tas;
/*
* TASK ABORTED status (TAS) bit support
*/
- if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ send_tas = (cmd->transport_state & CMD_T_TAS);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ if (send_tas) {
remove = false;
transport_send_task_abort(cmd);
}
@@ -107,6 +109,46 @@ static int target_check_cdb_and_preempt(struct list_head *list,
return 1;
}
+static bool __target_check_io_state(struct se_cmd *se_cmd,
+ struct se_session *tmr_sess, int tas)
+{
+ struct se_session *sess = se_cmd->se_sess;
+
+ assert_spin_locked(&sess->sess_cmd_lock);
+ WARN_ON_ONCE(!irqs_disabled());
+ /*
+ * If command already reached CMD_T_COMPLETE state within
+ * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
+ * this se_cmd has been passed to fabric driver and will
+ * not be aborted.
+ *
+ * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
+ * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
+ * long as se_cmd->cmd_kref is still active unless zero.
+ */
+ spin_lock(&se_cmd->t_state_lock);
+ if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
+ pr_debug("Attempted to abort io tag: %llu already complete or"
+ " fabric stop, skipping\n", se_cmd->tag);
+ spin_unlock(&se_cmd->t_state_lock);
+ return false;
+ }
+ if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
+ pr_debug("Attempted to abort io tag: %llu already shutdown,"
+ " skipping\n", se_cmd->tag);
+ spin_unlock(&se_cmd->t_state_lock);
+ return false;
+ }
+ se_cmd->transport_state |= CMD_T_ABORTED;
+
+ if ((tmr_sess != se_cmd->se_sess) && tas)
+ se_cmd->transport_state |= CMD_T_TAS;
+
+ spin_unlock(&se_cmd->t_state_lock);
+
+ return kref_get_unless_zero(&se_cmd->cmd_kref);
+}
+
void core_tmr_abort_task(
struct se_device *dev,
struct se_tmr_req *tmr,
@@ -130,34 +172,21 @@ void core_tmr_abort_task(
if (tmr->ref_task_tag != ref_tag)
continue;
- if (!kref_get_unless_zero(&se_cmd->cmd_kref))
- continue;
-
printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
se_cmd->se_tfo->get_fabric_name(), ref_tag);
- spin_lock(&se_cmd->t_state_lock);
- if (se_cmd->transport_state & CMD_T_COMPLETE) {
- printk("ABORT_TASK: ref_tag: %llu already complete,"
- " skipping\n", ref_tag);
- spin_unlock(&se_cmd->t_state_lock);
+ if (!__target_check_io_state(se_cmd, se_sess, 0)) {
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
-
- target_put_sess_cmd(se_cmd);
-
goto out;
}
- se_cmd->transport_state |= CMD_T_ABORTED;
- spin_unlock(&se_cmd->t_state_lock);
-
list_del_init(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
cancel_work_sync(&se_cmd->work);
transport_wait_for_tasks(se_cmd);
- target_put_sess_cmd(se_cmd);
transport_cmd_finish_abort(se_cmd, true);
+ target_put_sess_cmd(se_cmd);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
" ref_tag: %llu\n", ref_tag);
@@ -178,9 +207,11 @@ static void core_tmr_drain_tmr_list(
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_tmr_list);
+ struct se_session *sess;
struct se_tmr_req *tmr_p, *tmr_pp;
struct se_cmd *cmd;
unsigned long flags;
+ bool rc;
/*
* Release all pending and outgoing TMRs aside from the received
* LUN_RESET tmr..
@@ -206,17 +237,39 @@ static void core_tmr_drain_tmr_list(
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
+ sess = cmd->se_sess;
+ if (WARN_ON_ONCE(!sess))
+ continue;
+
+ spin_lock(&sess->sess_cmd_lock);
spin_lock(&cmd->t_state_lock);
- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
+ if (!(cmd->transport_state & CMD_T_ACTIVE) ||
+ (cmd->transport_state & CMD_T_FABRIC_STOP)) {
spin_unlock(&cmd->t_state_lock);
+ spin_unlock(&sess->sess_cmd_lock);
continue;
}
if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
spin_unlock(&cmd->t_state_lock);
+ spin_unlock(&sess->sess_cmd_lock);
continue;
}
+ if (sess->sess_tearing_down || cmd->cmd_wait_set) {
+ spin_unlock(&cmd->t_state_lock);
+ spin_unlock(&sess->sess_cmd_lock);
+ continue;
+ }
+ cmd->transport_state |= CMD_T_ABORTED;
spin_unlock(&cmd->t_state_lock);
+ rc = kref_get_unless_zero(&cmd->cmd_kref);
+ if (!rc) {
+ printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
+ spin_unlock(&sess->sess_cmd_lock);
+ continue;
+ }
+ spin_unlock(&sess->sess_cmd_lock);
+
list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
}
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
@@ -230,20 +283,26 @@ static void core_tmr_drain_tmr_list(
(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state);
+ cancel_work_sync(&cmd->work);
+ transport_wait_for_tasks(cmd);
+
transport_cmd_finish_abort(cmd, 1);
+ target_put_sess_cmd(cmd);
}
}
static void core_tmr_drain_state_list(
struct se_device *dev,
struct se_cmd *prout_cmd,
- struct se_node_acl *tmr_nacl,
+ struct se_session *tmr_sess,
int tas,
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_task_list);
+ struct se_session *sess;
struct se_cmd *cmd, *next;
unsigned long flags;
+ int rc;
/*
* Complete outstanding commands with TASK_ABORTED SAM status.
@@ -282,6 +341,16 @@ static void core_tmr_drain_state_list(
if (prout_cmd == cmd)
continue;
+ sess = cmd->se_sess;
+ if (WARN_ON_ONCE(!sess))
+ continue;
+
+ spin_lock(&sess->sess_cmd_lock);
+ rc = __target_check_io_state(cmd, tmr_sess, tas);
+ spin_unlock(&sess->sess_cmd_lock);
+ if (!rc)
+ continue;
+
list_move_tail(&cmd->state_list, &drain_task_list);
cmd->state_active = false;
}
@@ -289,7 +358,7 @@ static void core_tmr_drain_state_list(
while (!list_empty(&drain_task_list)) {
cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
- list_del(&cmd->state_list);
+ list_del_init(&cmd->state_list);
pr_debug("LUN_RESET: %s cmd: %p"
" ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
@@ -313,16 +382,11 @@ static void core_tmr_drain_state_list(
* loop above, but we do it down here given that
* cancel_work_sync may block.
*/
- if (cmd->t_state == TRANSPORT_COMPLETE)
- cancel_work_sync(&cmd->work);
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- target_stop_cmd(cmd, &flags);
-
- cmd->transport_state |= CMD_T_ABORTED;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ cancel_work_sync(&cmd->work);
+ transport_wait_for_tasks(cmd);
- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
+ core_tmr_handle_tas_abort(cmd, tas);
+ target_put_sess_cmd(cmd);
}
}
@@ -334,6 +398,7 @@ int core_tmr_lun_reset(
{
struct se_node_acl *tmr_nacl = NULL;
struct se_portal_group *tmr_tpg = NULL;
+ struct se_session *tmr_sess = NULL;
int tas;
/*
* TASK_ABORTED status bit, this is configurable via ConfigFS
@@ -352,8 +417,9 @@ int core_tmr_lun_reset(
* or struct se_device passthrough..
*/
if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
- tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
- tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
+ tmr_sess = tmr->task_cmd->se_sess;
+ tmr_nacl = tmr_sess->se_node_acl;
+ tmr_tpg = tmr_sess->se_tpg;
if (tmr_nacl && tmr_tpg) {
pr_debug("LUN_RESET: TMR caller fabric: %s"
" initiator port %s\n",
@@ -366,7 +432,7 @@ int core_tmr_lun_reset(
dev->transport->name, tas);
core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
- core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
+ core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
preempt_and_abort_list);
/*
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4fdcee2006d1..d151bc3d6971 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -528,9 +528,6 @@ void transport_deregister_session(struct se_session *se_sess)
}
EXPORT_SYMBOL(transport_deregister_session);
-/*
- * Called with cmd->t_state_lock held.
- */
static void target_remove_from_state_list(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -555,10 +552,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
{
unsigned long flags;
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (write_pending)
- cmd->t_state = TRANSPORT_WRITE_PENDING;
-
if (remove_from_lists) {
target_remove_from_state_list(cmd);
@@ -568,6 +561,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
cmd->se_lun = NULL;
}
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (write_pending)
+ cmd->t_state = TRANSPORT_WRITE_PENDING;
+
/*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
@@ -621,6 +618,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
+ bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
+
if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
transport_lun_remove_cmd(cmd);
/*
@@ -632,7 +631,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
if (transport_cmd_check_stop_to_fabric(cmd))
return;
- if (remove)
+ if (remove && ack_kref)
transport_put_cmd(cmd);
}
@@ -700,7 +699,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
* Check for case where an explicit ABORT_TASK has been received
* and transport_wait_for_tasks() will be waiting for completion..
*/
- if (cmd->transport_state & CMD_T_ABORTED &&
+ if (cmd->transport_state & CMD_T_ABORTED ||
cmd->transport_state & CMD_T_STOP) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete_all(&cmd->t_transport_stop_comp);
@@ -1850,19 +1849,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
return true;
}
+static int __transport_check_aborted_status(struct se_cmd *, int);
+
void target_execute_cmd(struct se_cmd *cmd)
{
/*
- * If the received CDB has aleady been aborted stop processing it here.
- */
- if (transport_check_aborted_status(cmd, 1))
- return;
-
- /*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
+ *
+ * If the received CDB has aleady been aborted stop processing it here.
*/
spin_lock_irq(&cmd->t_state_lock);
+ if (__transport_check_aborted_status(cmd, 1)) {
+ spin_unlock_irq(&cmd->t_state_lock);
+ return;
+ }
if (cmd->transport_state & CMD_T_STOP) {
pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
__func__, __LINE__, cmd->tag);
@@ -2213,20 +2214,14 @@ static inline void transport_free_pages(struct se_cmd *cmd)
}
/**
- * transport_release_cmd - free a command
- * @cmd: command to free
+ * transport_put_cmd - release a reference to a command
+ * @cmd: command to release
*
- * This routine unconditionally frees a command, and reference counting
- * or list removal must be done in the caller.
+ * This routine releases our reference to the command and frees it if possible.
*/
-static int transport_release_cmd(struct se_cmd *cmd)
+static int transport_put_cmd(struct se_cmd *cmd)
{
BUG_ON(!cmd->se_tfo);
-
- if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
- core_tmr_release_req(cmd->se_tmr_req);
- if (cmd->t_task_cdb != cmd->__t_task_cdb)
- kfree(cmd->t_task_cdb);
/*
* If this cmd has been setup with target_get_sess_cmd(), drop
* the kref and call ->release_cmd() in kref callback.
@@ -2234,18 +2229,6 @@ static int transport_release_cmd(struct se_cmd *cmd)
return target_put_sess_cmd(cmd);
}
-/**
- * transport_put_cmd - release a reference to a command
- * @cmd: command to release
- *
- * This routine releases our reference to the command and frees it if possible.
- */
-static int transport_put_cmd(struct se_cmd *cmd)
-{
- transport_free_pages(cmd);
- return transport_release_cmd(cmd);
-}
-
void *transport_kmap_data_sg(struct se_cmd *cmd)
{
struct scatterlist *sg = cmd->t_data_sg;
@@ -2441,34 +2424,58 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
}
}
-int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+static bool
+__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
+ unsigned long *flags);
+
+static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
{
unsigned long flags;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+}
+
+int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+{
int ret = 0;
+ bool aborted = false, tas = false;
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
- transport_wait_for_tasks(cmd);
+ target_wait_free_cmd(cmd, &aborted, &tas);
- ret = transport_release_cmd(cmd);
+ if (!aborted || tas)
+ ret = transport_put_cmd(cmd);
} else {
if (wait_for_tasks)
- transport_wait_for_tasks(cmd);
+ target_wait_free_cmd(cmd, &aborted, &tas);
/*
* Handle WRITE failure case where transport_generic_new_cmd()
* has already added se_cmd to state_list, but fabric has
* failed command before I/O submission.
*/
- if (cmd->state_active) {
- spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (cmd->state_active)
target_remove_from_state_list(cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- }
if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
- ret = transport_put_cmd(cmd);
+ if (!aborted || tas)
+ ret = transport_put_cmd(cmd);
+ }
+ /*
+ * If the task has been internally aborted due to TMR ABORT_TASK
+ * or LUN_RESET, target_core_tmr.c is responsible for performing
+ * the remaining calls to target_put_sess_cmd(), and not the
+ * callers of this function.
+ */
+ if (aborted) {
+ pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
+ wait_for_completion(&cmd->cmd_wait_comp);
+ cmd->se_tfo->release_cmd(cmd);
+ ret = 1;
}
return ret;
}
@@ -2508,26 +2515,46 @@ out:
}
EXPORT_SYMBOL(target_get_sess_cmd);
+static void target_free_cmd_mem(struct se_cmd *cmd)
+{
+ transport_free_pages(cmd);
+
+ if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+ core_tmr_release_req(cmd->se_tmr_req);
+ if (cmd->t_task_cdb != cmd->__t_task_cdb)
+ kfree(cmd->t_task_cdb);
+}
+
static void target_release_cmd_kref(struct kref *kref)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess;
unsigned long flags;
+ bool fabric_stop;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (list_empty(&se_cmd->se_cmd_list)) {
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
return;
}
- if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
+
+ spin_lock(&se_cmd->t_state_lock);
+ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
+ spin_unlock(&se_cmd->t_state_lock);
+
+ if (se_cmd->cmd_wait_set || fabric_stop) {
+ list_del_init(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ target_free_cmd_mem(se_cmd);
complete(&se_cmd->cmd_wait_comp);
return;
}
- list_del(&se_cmd->se_cmd_list);
+ list_del_init(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
}
@@ -2539,6 +2566,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
struct se_session *se_sess = se_cmd->se_sess;
if (!se_sess) {
+ target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
return 1;
}
@@ -2555,6 +2583,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
{
struct se_cmd *se_cmd;
unsigned long flags;
+ int rc;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (se_sess->sess_tearing_down) {
@@ -2564,8 +2593,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
se_sess->sess_tearing_down = 1;
list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
- se_cmd->cmd_wait_set = 1;
+ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
+ rc = kref_get_unless_zero(&se_cmd->cmd_kref);
+ if (rc) {
+ se_cmd->cmd_wait_set = 1;
+ spin_lock(&se_cmd->t_state_lock);
+ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+ spin_unlock(&se_cmd->t_state_lock);
+ }
+ }
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
@@ -2578,15 +2614,23 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
{
struct se_cmd *se_cmd, *tmp_cmd;
unsigned long flags;
+ bool tas;
list_for_each_entry_safe(se_cmd, tmp_cmd,
&se_sess->sess_wait_list, se_cmd_list) {
- list_del(&se_cmd->se_cmd_list);
-
pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
" %d\n", se_cmd, se_cmd->t_state,
se_cmd->se_tfo->get_cmd_state(se_cmd));
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+ tas = (se_cmd->transport_state & CMD_T_TAS);
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+ if (!target_put_sess_cmd(se_cmd)) {
+ if (tas)
+ target_put_sess_cmd(se_cmd);
+ }
+
wait_for_completion(&se_cmd->cmd_wait_comp);
pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
" fabric state: %d\n", se_cmd, se_cmd->t_state,
@@ -2608,53 +2652,75 @@ void transport_clear_lun_ref(struct se_lun *lun)
wait_for_completion(&lun->lun_ref_comp);
}
-/**
- * transport_wait_for_tasks - wait for completion to occur
- * @cmd: command to wait
- *
- * Called from frontend fabric context to wait for storage engine
- * to pause and/or release frontend generated struct se_cmd.
- */
-bool transport_wait_for_tasks(struct se_cmd *cmd)
+static bool
+__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
+ bool *aborted, bool *tas, unsigned long *flags)
+ __releases(&cmd->t_state_lock)
+ __acquires(&cmd->t_state_lock)
{
- unsigned long flags;
- spin_lock_irqsave(&cmd->t_state_lock, flags);
+ assert_spin_locked(&cmd->t_state_lock);
+ WARN_ON_ONCE(!irqs_disabled());
+
+ if (fabric_stop)
+ cmd->transport_state |= CMD_T_FABRIC_STOP;
+
+ if (cmd->transport_state & CMD_T_ABORTED)
+ *aborted = true;
+
+ if (cmd->transport_state & CMD_T_TAS)
+ *tas = true;
+
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
return false;
- }
if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
return false;
- }
- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ if (!(cmd->transport_state & CMD_T_ACTIVE))
+ return false;
+
+ if (fabric_stop && *aborted)
return false;
- }
cmd->transport_state |= CMD_T_STOP;
- pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n",
- cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
+ pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
+ " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
wait_for_completion(&cmd->t_transport_stop_comp);
- spin_lock_irqsave(&cmd->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, *flags);
cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
- pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n",
- cmd->tag);
+ pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
+ "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
+
+ return true;
+}
+
+/**
+ * transport_wait_for_tasks - wait for completion to occur
+ * @cmd: command to wait
+ *
+ * Called from frontend fabric context to wait for storage engine
+ * to pause and/or release frontend generated struct se_cmd.
+ */
+bool transport_wait_for_tasks(struct se_cmd *cmd)
+{
+ unsigned long flags;
+ bool ret, aborted = false, tas = false;
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return true;
+ return ret;
}
EXPORT_SYMBOL(transport_wait_for_tasks);
@@ -2836,28 +2902,49 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);
-int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+ __releases(&cmd->t_state_lock)
+ __acquires(&cmd->t_state_lock)
{
+ assert_spin_locked(&cmd->t_state_lock);
+ WARN_ON_ONCE(!irqs_disabled());
+
if (!(cmd->transport_state & CMD_T_ABORTED))
return 0;
-
/*
* If cmd has been aborted but either no status is to be sent or it has
* already been sent, just return
*/
- if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
+ if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
+ if (send_status)
+ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
return 1;
+ }
- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n",
- cmd->t_task_cdb[0], cmd->tag);
+ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
+ " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
trace_target_cmd_complete(cmd);
+
+ spin_unlock_irq(&cmd->t_state_lock);
cmd->se_tfo->queue_status(cmd);
+ spin_lock_irq(&cmd->t_state_lock);
return 1;
}
+
+int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+{
+ int ret;
+
+ spin_lock_irq(&cmd->t_state_lock);
+ ret = __transport_check_aborted_status(cmd, send_status);
+ spin_unlock_irq(&cmd->t_state_lock);
+
+ return ret;
+}
EXPORT_SYMBOL(transport_check_aborted_status);
void transport_send_task_abort(struct se_cmd *cmd)
@@ -2879,11 +2966,17 @@ void transport_send_task_abort(struct se_cmd *cmd)
*/
if (cmd->data_direction == DMA_TO_DEVICE) {
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
- cmd->transport_state |= CMD_T_ABORTED;
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ goto send_abort;
+ }
cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
}
+send_abort:
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
transport_lun_remove_cmd(cmd);
@@ -2900,8 +2993,17 @@ static void target_tmr_work(struct work_struct *work)
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
struct se_device *dev = cmd->se_dev;
struct se_tmr_req *tmr = cmd->se_tmr_req;
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (cmd->transport_state & CMD_T_ABORTED) {
+ tmr->response = TMR_FUNCTION_REJECTED;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ goto check_stop;
+ }
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
switch (tmr->function) {
case TMR_ABORT_TASK:
core_tmr_abort_task(dev, tmr, cmd->se_sess);
@@ -2934,9 +3036,17 @@ static void target_tmr_work(struct work_struct *work)
break;
}
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (cmd->transport_state & CMD_T_ABORTED) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ goto check_stop;
+ }
cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
cmd->se_tfo->queue_tm_rsp(cmd);
+check_stop:
transport_cmd_check_stop_to_fabric(cmd);
}
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index e3fbc5a5d88f..6ceac4f2d4b2 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -377,26 +377,28 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device,
* get_load() - get load for a cpu since last updated
* @cpufreq_device: &struct cpufreq_cooling_device for this cpu
* @cpu: cpu number
+ * @cpu_idx: index of the cpu in cpufreq_device->allowed_cpus
*
* Return: The average load of cpu @cpu in percentage since this
* function was last called.
*/
-static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu)
+static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu,
+ int cpu_idx)
{
u32 load;
u64 now, now_idle, delta_time, delta_idle;
now_idle = get_cpu_idle_time(cpu, &now, 0);
- delta_idle = now_idle - cpufreq_device->time_in_idle[cpu];
- delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu];
+ delta_idle = now_idle - cpufreq_device->time_in_idle[cpu_idx];
+ delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu_idx];
if (delta_time <= delta_idle)
load = 0;
else
load = div64_u64(100 * (delta_time - delta_idle), delta_time);
- cpufreq_device->time_in_idle[cpu] = now_idle;
- cpufreq_device->time_in_idle_timestamp[cpu] = now;
+ cpufreq_device->time_in_idle[cpu_idx] = now_idle;
+ cpufreq_device->time_in_idle_timestamp[cpu_idx] = now;
return load;
}
@@ -598,7 +600,7 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
u32 load;
if (cpu_online(cpu))
- load = get_load(cpufreq_device, cpu);
+ load = get_load(cpufreq_device, cpu, i);
else
load = 0;
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 36d07295f8e3..04136f58682d 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -68,12 +68,12 @@ static inline int _step_to_temp(int step)
* Every step equals (1 * 200) / 255 celsius, and finally
* need convert to millicelsius.
*/
- return (HISI_TEMP_BASE + (step * 200 / 255)) * 1000;
+ return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255));
}
static inline long _temp_to_step(long temp)
{
- return ((temp / 1000 - HISI_TEMP_BASE) * 255 / 200);
+ return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000;
}
static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
@@ -160,7 +160,7 @@ static int hisi_thermal_get_temp(void *_sensor, int *temp)
struct hisi_thermal_sensor *sensor = _sensor;
struct hisi_thermal_data *data = sensor->thermal;
- int sensor_id = 0, i;
+ int sensor_id = -1, i;
long max_temp = 0;
*temp = hisi_thermal_get_sensor_temp(data, sensor);
@@ -168,12 +168,19 @@ static int hisi_thermal_get_temp(void *_sensor, int *temp)
sensor->sensor_temp = *temp;
for (i = 0; i < HISI_MAX_SENSORS; i++) {
+ if (!data->sensors[i].tzd)
+ continue;
+
if (data->sensors[i].sensor_temp >= max_temp) {
max_temp = data->sensors[i].sensor_temp;
sensor_id = i;
}
}
+ /* If no sensor has been enabled, then skip to enable irq */
+ if (sensor_id == -1)
+ return 0;
+
mutex_lock(&data->thermal_lock);
data->irq_bind_sensor = sensor_id;
mutex_unlock(&data->thermal_lock);
@@ -190,7 +197,7 @@ static int hisi_thermal_get_temp(void *_sensor, int *temp)
return 0;
}
- if (max_temp < sensor->thres_temp) {
+ if (!data->irq_enabled && max_temp < sensor->thres_temp) {
data->irq_enabled = true;
hisi_thermal_enable_bind_irq_sensor(data);
enable_irq(data->irq);
@@ -226,8 +233,12 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
sensor->thres_temp / 1000);
mutex_unlock(&data->thermal_lock);
- for (i = 0; i < HISI_MAX_SENSORS; i++)
+ for (i = 0; i < HISI_MAX_SENSORS; i++) {
+ if (!data->sensors[i].tzd)
+ continue;
+
thermal_zone_device_update(data->sensors[i].tzd);
+ }
return IRQ_HANDLED;
}
@@ -247,6 +258,7 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev,
sensor, &hisi_of_thermal_ops);
if (IS_ERR(sensor->tzd)) {
ret = PTR_ERR(sensor->tzd);
+ sensor->tzd = NULL;
dev_err(&pdev->dev, "failed to register sensor id %d: %d\n",
sensor->id, ret);
return ret;
@@ -331,28 +343,21 @@ static int hisi_thermal_probe(struct platform_device *pdev)
return ret;
}
+ hisi_thermal_enable_bind_irq_sensor(data);
+ irq_get_irqchip_state(data->irq, IRQCHIP_STATE_MASKED,
+ &data->irq_enabled);
+
for (i = 0; i < HISI_MAX_SENSORS; ++i) {
ret = hisi_thermal_register_sensor(pdev, data,
&data->sensors[i], i);
- if (ret) {
+ if (ret)
dev_err(&pdev->dev,
"failed to register thermal sensor: %d\n", ret);
- goto err_get_sensor_data;
- }
+ else
+ hisi_thermal_toggle_sensor(&data->sensors[i], true);
}
- hisi_thermal_enable_bind_irq_sensor(data);
- data->irq_enabled = true;
-
- for (i = 0; i < HISI_MAX_SENSORS; i++)
- hisi_thermal_toggle_sensor(&data->sensors[i], true);
-
return 0;
-
-err_get_sensor_data:
- clk_disable_unprepare(data->clk);
-
- return ret;
}
static int hisi_thermal_remove(struct platform_device *pdev)
@@ -363,6 +368,9 @@ static int hisi_thermal_remove(struct platform_device *pdev)
for (i = 0; i < HISI_MAX_SENSORS; i++) {
struct hisi_thermal_sensor *sensor = &data->sensors[i];
+ if (!sensor->tzd)
+ continue;
+
hisi_thermal_toggle_sensor(sensor, false);
thermal_zone_of_sensor_unregister(&pdev->dev, sensor->tzd);
}
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 2f9f7086ac3d..ea9366ad3e6b 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -63,6 +63,19 @@ static unsigned long get_target_state(struct thermal_instance *instance,
next_target = instance->target;
dev_dbg(&cdev->device, "cur_state=%ld\n", cur_state);
+ if (!instance->initialized) {
+ if (throttle) {
+ next_target = (cur_state + 1) >= instance->upper ?
+ instance->upper :
+ ((cur_state + 1) < instance->lower ?
+ instance->lower : (cur_state + 1));
+ } else {
+ next_target = THERMAL_NO_TARGET;
+ }
+
+ return next_target;
+ }
+
switch (trend) {
case THERMAL_TREND_RAISING:
if (throttle) {
@@ -149,7 +162,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
old_target, (int)instance->target);
- if (old_target == instance->target)
+ if (instance->initialized && old_target == instance->target)
continue;
/* Activate a passive thermal instance */
@@ -161,7 +174,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
instance->target == THERMAL_NO_TARGET)
update_passive_instance(tz, trip_type, -1);
-
+ instance->initialized = true;
instance->cdev->updated = false; /* cdev needs update */
}
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index d9e525cc9c1c..3d5f8f432b5b 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -37,6 +37,7 @@
#include <linux/of.h>
#include <net/netlink.h>
#include <net/genetlink.h>
+#include <linux/suspend.h>
#define CREATE_TRACE_POINTS
#include <trace/events/thermal.h>
@@ -59,6 +60,8 @@ static LIST_HEAD(thermal_governor_list);
static DEFINE_MUTEX(thermal_list_lock);
static DEFINE_MUTEX(thermal_governor_lock);
+static atomic_t in_suspend;
+
static struct thermal_governor *def_governor;
static struct thermal_governor *__find_governor(const char *name)
@@ -451,6 +454,10 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
{
enum thermal_trip_type type;
+ /* Ignore disabled trip points */
+ if (test_bit(trip, &tz->trips_disabled))
+ return;
+
tz->ops->get_trip_type(tz, trip, &type);
if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT)
@@ -532,14 +539,31 @@ static void update_temperature(struct thermal_zone_device *tz)
mutex_unlock(&tz->lock);
trace_thermal_temperature(tz);
- dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
- tz->last_temperature, tz->temperature);
+ if (tz->last_temperature == THERMAL_TEMP_INVALID)
+ dev_dbg(&tz->device, "last_temperature N/A, current_temperature=%d\n",
+ tz->temperature);
+ else
+ dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
+ tz->last_temperature, tz->temperature);
+}
+
+static void thermal_zone_device_reset(struct thermal_zone_device *tz)
+{
+ struct thermal_instance *pos;
+
+ tz->temperature = THERMAL_TEMP_INVALID;
+ tz->passive = 0;
+ list_for_each_entry(pos, &tz->thermal_instances, tz_node)
+ pos->initialized = false;
}
void thermal_zone_device_update(struct thermal_zone_device *tz)
{
int count;
+ if (atomic_read(&in_suspend))
+ return;
+
if (!tz->ops->get_temp)
return;
@@ -1321,6 +1345,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
if (!result) {
list_add_tail(&dev->tz_node, &tz->thermal_instances);
list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
+ atomic_set(&tz->need_update, 1);
}
mutex_unlock(&cdev->lock);
mutex_unlock(&tz->lock);
@@ -1430,6 +1455,7 @@ __thermal_cooling_device_register(struct device_node *np,
const struct thermal_cooling_device_ops *ops)
{
struct thermal_cooling_device *cdev;
+ struct thermal_zone_device *pos = NULL;
int result;
if (type && strlen(type) >= THERMAL_NAME_LENGTH)
@@ -1474,6 +1500,12 @@ __thermal_cooling_device_register(struct device_node *np,
/* Update binding information for 'this' new cdev */
bind_cdev(cdev);
+ mutex_lock(&thermal_list_lock);
+ list_for_each_entry(pos, &thermal_tz_list, node)
+ if (atomic_cmpxchg(&pos->need_update, 1, 0))
+ thermal_zone_device_update(pos);
+ mutex_unlock(&thermal_list_lock);
+
return cdev;
}
@@ -1768,6 +1800,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
{
struct thermal_zone_device *tz;
enum thermal_trip_type trip_type;
+ int trip_temp;
int result;
int count;
int passive = 0;
@@ -1806,6 +1839,8 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
tz->trips = trips;
tz->passive_delay = passive_delay;
tz->polling_delay = polling_delay;
+ /* A new thermal zone needs to be updated anyway. */
+ atomic_set(&tz->need_update, 1);
dev_set_name(&tz->device, "thermal_zone%d", tz->id);
result = device_register(&tz->device);
@@ -1837,9 +1872,15 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
goto unregister;
for (count = 0; count < trips; count++) {
- tz->ops->get_trip_type(tz, count, &trip_type);
+ if (tz->ops->get_trip_type(tz, count, &trip_type))
+ set_bit(count, &tz->trips_disabled);
if (trip_type == THERMAL_TRIP_PASSIVE)
passive = 1;
+ if (tz->ops->get_trip_temp(tz, count, &trip_temp))
+ set_bit(count, &tz->trips_disabled);
+ /* Check for bogus trip points */
+ if (trip_temp == 0)
+ set_bit(count, &tz->trips_disabled);
}
if (!passive) {
@@ -1900,7 +1941,10 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
- thermal_zone_device_update(tz);
+ thermal_zone_device_reset(tz);
+ /* Update the new thermal zone and mark it as already updated. */
+ if (atomic_cmpxchg(&tz->need_update, 1, 0))
+ thermal_zone_device_update(tz);
return tz;
@@ -2140,6 +2184,36 @@ static void thermal_unregister_governors(void)
thermal_gov_power_allocator_unregister();
}
+static int thermal_pm_notify(struct notifier_block *nb,
+ unsigned long mode, void *_unused)
+{
+ struct thermal_zone_device *tz;
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ atomic_set(&in_suspend, 1);
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ atomic_set(&in_suspend, 0);
+ list_for_each_entry(tz, &thermal_tz_list, node) {
+ thermal_zone_device_reset(tz);
+ thermal_zone_device_update(tz);
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block thermal_pm_nb = {
+ .notifier_call = thermal_pm_notify,
+};
+
static int __init thermal_init(void)
{
int result;
@@ -2160,6 +2234,11 @@ static int __init thermal_init(void)
if (result)
goto exit_netlink;
+ result = register_pm_notifier(&thermal_pm_nb);
+ if (result)
+ pr_warn("Thermal: Can not register suspend notifier, return %d\n",
+ result);
+
return 0;
exit_netlink:
@@ -2179,6 +2258,7 @@ error:
static void __exit thermal_exit(void)
{
+ unregister_pm_notifier(&thermal_pm_nb);
of_thermal_destroy_zones();
genetlink_exit();
class_unregister(&thermal_class);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index d7ac1fccd659..749d41abfbab 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -41,6 +41,7 @@ struct thermal_instance {
struct thermal_zone_device *tz;
struct thermal_cooling_device *cdev;
int trip;
+ bool initialized;
unsigned long upper; /* Highest cooling state for this trip point */
unsigned long lower; /* Lowest cooling state for this trip point */
unsigned long target; /* expected cooling state */
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index e49c2bce551d..cf000b331eed 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -258,16 +258,13 @@ static void n_tty_check_throttle(struct tty_struct *tty)
static void n_tty_check_unthrottle(struct tty_struct *tty)
{
- if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
- tty->link->ldisc->ops->write_wakeup == n_tty_write_wakeup) {
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
return;
if (!tty->count)
return;
n_tty_kick_worker(tty);
- n_tty_write_wakeup(tty->link);
- if (waitqueue_active(&tty->link->write_wait))
- wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT);
+ tty_wakeup(tty->link);
return;
}
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index a45660f62db5..78e983677339 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -681,7 +681,14 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
/* this is called once with whichever end is closed last */
static void pty_unix98_shutdown(struct tty_struct *tty)
{
- devpts_kill_index(tty->driver_data, tty->index);
+ struct inode *ptmx_inode;
+
+ if (tty->driver->subtype == PTY_TYPE_MASTER)
+ ptmx_inode = tty->driver_data;
+ else
+ ptmx_inode = tty->link->driver_data;
+ devpts_kill_index(ptmx_inode, tty->index);
+ devpts_del_ref(ptmx_inode);
}
static const struct tty_operations ptm_unix98_ops = {
@@ -773,6 +780,18 @@ static int ptmx_open(struct inode *inode, struct file *filp)
set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
tty->driver_data = inode;
+ /*
+ * In the case where all references to ptmx inode are dropped and we
+ * still have /dev/tty opened pointing to the master/slave pair (ptmx
+ * is closed/released before /dev/tty), we must make sure that the inode
+ * is still valid when we call the final pty_unix98_shutdown, thus we
+ * hold an additional reference to the ptmx inode. For the same /dev/tty
+ * last close case, we also need to make sure the super_block isn't
+ * destroyed (devpts instance unmounted), before /dev/tty is closed and
+ * on its release devpts_kill_index is called.
+ */
+ devpts_add_ref(inode);
+
tty_add_file(tty, filp);
slave_inode = devpts_pty_new(inode,
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 4097f3f65b3b..7cd6f9a90542 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1379,6 +1379,9 @@ ce4100_serial_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_INTEL_BSW_UART1 0x228a
#define PCI_DEVICE_ID_INTEL_BSW_UART2 0x228c
+#define PCI_DEVICE_ID_INTEL_BDW_UART1 0x9ce3
+#define PCI_DEVICE_ID_INTEL_BDW_UART2 0x9ce4
+
#define BYT_PRV_CLK 0x800
#define BYT_PRV_CLK_EN (1 << 0)
#define BYT_PRV_CLK_M_VAL_SHIFT 1
@@ -1461,11 +1464,13 @@ byt_serial_setup(struct serial_private *priv,
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_BYT_UART1:
case PCI_DEVICE_ID_INTEL_BSW_UART1:
+ case PCI_DEVICE_ID_INTEL_BDW_UART1:
rx_param->src_id = 3;
tx_param->dst_id = 2;
break;
case PCI_DEVICE_ID_INTEL_BYT_UART2:
case PCI_DEVICE_ID_INTEL_BSW_UART2:
+ case PCI_DEVICE_ID_INTEL_BDW_UART2:
rx_param->src_id = 5;
tx_param->dst_id = 4;
break;
@@ -1936,6 +1941,7 @@ pci_wch_ch38x_setup(struct serial_private *priv,
#define PCIE_VENDOR_ID_WCH 0x1c00
#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
+#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
#define PCI_VENDOR_ID_PERICOM 0x12D8
#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
@@ -2062,6 +2068,20 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = byt_serial_setup,
},
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BDW_UART1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = byt_serial_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BDW_UART2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = byt_serial_setup,
+ },
/*
* ITE
*/
@@ -2618,6 +2638,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
+ /* WCH CH382 2S card (16850 clone) */
+ {
+ .vendor = PCIE_VENDOR_ID_WCH,
+ .device = PCIE_DEVICE_ID_WCH_CH382_2S,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_wch_ch38x_setup,
+ },
/* WCH CH382 2S1P card (16850 clone) */
{
.vendor = PCIE_VENDOR_ID_WCH,
@@ -2936,6 +2964,7 @@ enum pci_board_num_t {
pbn_fintek_4,
pbn_fintek_8,
pbn_fintek_12,
+ pbn_wch382_2,
pbn_wch384_4,
pbn_pericom_PI7C9X7951,
pbn_pericom_PI7C9X7952,
@@ -3756,6 +3785,13 @@ static struct pciserial_board pci_boards[] = {
.base_baud = 115200,
.first_offset = 0x40,
},
+ [pbn_wch382_2] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ .first_offset = 0xC0,
+ },
[pbn_wch384_4] = {
.flags = FL_BASE0,
.num_ports = 4,
@@ -5506,6 +5542,16 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
pbn_byt },
+ /* Intel Broadwell */
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
+ pbn_byt },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART2,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
+ pbn_byt },
+
/*
* Intel Quark x1000
*/
@@ -5545,6 +5591,10 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_b0_bt_2_115200 },
+ { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, pbn_wch382_2 },
+
{ PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_wch384_4 },
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 52d82d2ac726..56ccbcefdd85 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -713,22 +713,16 @@ static int size_fifo(struct uart_8250_port *up)
*/
static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
{
- unsigned char old_dll, old_dlm, old_lcr;
- unsigned int id;
+ unsigned char old_lcr;
+ unsigned int id, old_dl;
old_lcr = serial_in(p, UART_LCR);
serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
+ old_dl = serial_dl_read(p);
+ serial_dl_write(p, 0);
+ id = serial_dl_read(p);
+ serial_dl_write(p, old_dl);
- old_dll = serial_in(p, UART_DLL);
- old_dlm = serial_in(p, UART_DLM);
-
- serial_out(p, UART_DLL, 0);
- serial_out(p, UART_DLM, 0);
-
- id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
-
- serial_out(p, UART_DLL, old_dll);
- serial_out(p, UART_DLM, old_dlm);
serial_out(p, UART_LCR, old_lcr);
return id;
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 9d4c84f7485f..24280d9a05e9 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1343,7 +1343,7 @@ static inline void serial_omap_add_console_port(struct uart_omap_port *up)
/* Enable or disable the rs485 support */
static int
-serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
+serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned int mode;
@@ -1356,8 +1356,12 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
up->ier = 0;
serial_out(up, UART_IER, 0);
+ /* Clamp the delays to [0, 100ms] */
+ rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
+ rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
+
/* store new config */
- port->rs485 = *rs485conf;
+ port->rs485 = *rs485;
/*
* Just as a precaution, only allow rs485
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index bcc8e1e8bb72..7cef54334b12 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1462,13 +1462,13 @@ static int tty_reopen(struct tty_struct *tty)
{
struct tty_driver *driver = tty->driver;
- if (!tty->count)
- return -EIO;
-
if (driver->type == TTY_DRIVER_TYPE_PTY &&
driver->subtype == PTY_TYPE_MASTER)
return -EIO;
+ if (!tty->count)
+ return -EAGAIN;
+
if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
return -EBUSY;
@@ -2069,7 +2069,12 @@ retry_open:
if (tty) {
mutex_unlock(&tty_mutex);
- tty_lock(tty);
+ retval = tty_lock_interruptible(tty);
+ if (retval) {
+ if (retval == -EINTR)
+ retval = -ERESTARTSYS;
+ goto err_unref;
+ }
/* safe to drop the kref from tty_driver_lookup_tty() */
tty_kref_put(tty);
retval = tty_reopen(tty);
@@ -2087,7 +2092,11 @@ retry_open:
if (IS_ERR(tty)) {
retval = PTR_ERR(tty);
- goto err_file;
+ if (retval != -EAGAIN || signal_pending(current))
+ goto err_file;
+ tty_free_file(filp);
+ schedule();
+ goto retry_open;
}
tty_add_file(tty, filp);
@@ -2156,6 +2165,7 @@ retry_open:
return 0;
err_unlock:
mutex_unlock(&tty_mutex);
+err_unref:
/* after locks to avoid deadlock */
if (!IS_ERR_OR_NULL(driver))
tty_driver_kref_put(driver);
@@ -2653,6 +2663,28 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
}
/**
+ * tiocgetd - get line discipline
+ * @tty: tty device
+ * @p: pointer to user data
+ *
+ * Retrieves the line discipline id directly from the ldisc.
+ *
+ * Locking: waits for ldisc reference (in case the line discipline
+ * is changing or the tty is being hungup)
+ */
+
+static int tiocgetd(struct tty_struct *tty, int __user *p)
+{
+ struct tty_ldisc *ld;
+ int ret;
+
+ ld = tty_ldisc_ref_wait(tty);
+ ret = put_user(ld->ops->num, p);
+ tty_ldisc_deref(ld);
+ return ret;
+}
+
+/**
* send_break - performed time break
* @tty: device to break on
* @duration: timeout in mS
@@ -2878,7 +2910,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case TIOCGSID:
return tiocgsid(tty, real_tty, p);
case TIOCGETD:
- return put_user(tty->ldisc->ops->num, (int __user *)p);
+ return tiocgetd(tty, p);
case TIOCSETD:
return tiocsetd(tty, p);
case TIOCVHANGUP:
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index 0efcf713b756..d09293bc0e04 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -22,6 +22,14 @@ void __lockfunc tty_lock(struct tty_struct *tty)
}
EXPORT_SYMBOL(tty_lock);
+int tty_lock_interruptible(struct tty_struct *tty)
+{
+ if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
+ return -EIO;
+ tty_kref_get(tty);
+ return mutex_lock_interruptible(&tty->legacy_mutex);
+}
+
void __lockfunc tty_unlock(struct tty_struct *tty)
{
if (tty->magic != TTY_MAGIC) {
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 45f86da1d6d3..03b6743461d1 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -158,7 +158,7 @@ static void ci_otg_work(struct work_struct *work)
int ci_hdrc_otg_init(struct ci_hdrc *ci)
{
INIT_WORK(&ci->work, ci_otg_work);
- ci->wq = create_singlethread_workqueue("ci_otg");
+ ci->wq = create_freezable_workqueue("ci_otg");
if (!ci->wq) {
dev_err(ci->dev, "can't create workqueue\n");
return -ENODEV;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 26ca4f910cb0..d37fdcc3143c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -428,7 +428,8 @@ static void acm_read_bulk_callback(struct urb *urb)
set_bit(rb->index, &acm->read_urbs_free);
dev_dbg(&acm->data->dev, "%s - non-zero urb status: %d\n",
__func__, status);
- return;
+ if ((status != -ENOENT) || (urb->actual_length == 0))
+ return;
}
usb_mark_last_busy(acm->dev);
@@ -1113,6 +1114,9 @@ static int acm_probe(struct usb_interface *intf,
if (quirks == NO_UNION_NORMAL) {
data_interface = usb_ifnum_to_if(usb_dev, 1);
control_interface = usb_ifnum_to_if(usb_dev, 0);
+ /* we would crash */
+ if (!data_interface || !control_interface)
+ return -ENODEV;
goto skip_normal_probe;
}
@@ -1404,6 +1408,8 @@ made_compressed_probe:
usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
NULL, acm->writesize, acm_write_bulk, snd);
snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ if (quirks & SEND_ZERO_PACKET)
+ snd->urb->transfer_flags |= URB_ZERO_PACKET;
snd->instance = acm;
}
@@ -1838,6 +1844,11 @@ static const struct usb_device_id acm_ids[] = {
},
#endif
+ /*Samsung phone in firmware update mode */
+ { USB_DEVICE(0x04e8, 0x685d),
+ .driver_info = IGNORE_DEVICE,
+ },
+
/* Exclude Infineon Flash Loader utility */
{ USB_DEVICE(0x058b, 0x0041),
.driver_info = IGNORE_DEVICE,
@@ -1861,6 +1872,10 @@ static const struct usb_device_id acm_ids[] = {
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_CDMA) },
+ { USB_DEVICE(0x1519, 0x0452), /* Intel 7260 modem */
+ .driver_info = SEND_ZERO_PACKET,
+ },
+
{ }
};
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index dd9af38e7cda..ccfaba9ab4e4 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -134,3 +134,4 @@ struct acm {
#define IGNORE_DEVICE BIT(5)
#define QUIRK_CONTROL_LINE_STATE BIT(6)
#define CLEAR_HALT_CONDITIONS BIT(7)
+#define SEND_ZERO_PACKET BIT(8)
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 56593a9a8726..2057d91d8336 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -502,11 +502,15 @@ static int usb_unbind_interface(struct device *dev)
int usb_driver_claim_interface(struct usb_driver *driver,
struct usb_interface *iface, void *priv)
{
- struct device *dev = &iface->dev;
+ struct device *dev;
struct usb_device *udev;
int retval = 0;
int lpm_disable_error;
+ if (!iface)
+ return -ENODEV;
+
+ dev = &iface->dev;
if (dev->driver)
return -EBUSY;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index ddbf32d599cb..84df093639ac 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3895,17 +3895,30 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
return;
}
- if (usb_set_lpm_timeout(udev, state, timeout))
+ if (usb_set_lpm_timeout(udev, state, timeout)) {
/* If we can't set the parent hub U1/U2 timeout,
* device-initiated LPM won't be allowed either, so let the xHCI
* host know that this link state won't be enabled.
*/
hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
+ } else {
+ /* Only a configured device will accept the Set Feature
+ * U1/U2_ENABLE
+ */
+ if (udev->actconfig)
+ usb_set_device_initiated_lpm(udev, state, true);
- /* Only a configured device will accept the Set Feature U1/U2_ENABLE */
- else if (udev->actconfig)
- usb_set_device_initiated_lpm(udev, state, true);
-
+ /* As soon as usb_set_lpm_timeout(timeout) returns 0, the
+ * hub-initiated LPM is enabled. Thus, LPM is enabled no
+ * matter the result of usb_set_device_initiated_lpm().
+ * The only difference is whether device is able to initiate
+ * LPM.
+ */
+ if (state == USB3_LPM_U1)
+ udev->usb3_lpm_u1_enabled = 1;
+ else if (state == USB3_LPM_U2)
+ udev->usb3_lpm_u2_enabled = 1;
+ }
}
/*
@@ -3945,6 +3958,18 @@ static int usb_disable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
dev_warn(&udev->dev, "Could not disable xHCI %s timeout, "
"bus schedule bandwidth may be impacted.\n",
usb3_lpm_names[state]);
+
+ /* As soon as usb_set_lpm_timeout(0) return 0, hub initiated LPM
+ * is disabled. Hub will disallows link to enter U1/U2 as well,
+ * even device is initiating LPM. Hence LPM is disabled if hub LPM
+ * timeout set to 0, no matter device-initiated LPM is disabled or
+ * not.
+ */
+ if (state == USB3_LPM_U1)
+ udev->usb3_lpm_u1_enabled = 0;
+ else if (state == USB3_LPM_U2)
+ udev->usb3_lpm_u2_enabled = 0;
+
return 0;
}
@@ -3979,8 +4004,6 @@ int usb_disable_lpm(struct usb_device *udev)
if (usb_disable_link_state(hcd, udev, USB3_LPM_U2))
goto enable_lpm;
- udev->usb3_lpm_enabled = 0;
-
return 0;
enable_lpm:
@@ -4038,8 +4061,6 @@ void usb_enable_lpm(struct usb_device *udev)
usb_enable_link_state(hcd, udev, USB3_LPM_U1);
usb_enable_link_state(hcd, udev, USB3_LPM_U2);
-
- udev->usb3_lpm_enabled = 1;
}
EXPORT_SYMBOL_GPL(usb_enable_lpm);
@@ -4256,7 +4277,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
{
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
- int i, j, retval;
+ int retries, operations, retval, i;
unsigned delay = HUB_SHORT_RESET_TIME;
enum usb_device_speed oldspeed = udev->speed;
const char *speed;
@@ -4358,7 +4379,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* first 8 bytes of the device descriptor to get the ep0 maxpacket
* value.
*/
- for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
+ for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) {
bool did_new_scheme = false;
if (use_new_scheme(udev, retry_counter)) {
@@ -4385,7 +4406,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* 255 is for WUSB devices, we actually need to use
* 512 (WUSB1.0[4.8.1]).
*/
- for (j = 0; j < 3; ++j) {
+ for (operations = 0; operations < 3; ++operations) {
buf->bMaxPacketSize0 = 0;
r = usb_control_msg(udev, usb_rcvaddr0pipe(),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
@@ -4405,7 +4426,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
r = -EPROTO;
break;
}
- if (r == 0)
+ /*
+ * Some devices time out if they are powered on
+ * when already connected. They need a second
+ * reset. But only on the first attempt,
+ * lest we get into a time out/reset loop
+ */
+ if (r == 0 || (r == -ETIMEDOUT && retries == 0))
break;
}
udev->descriptor.bMaxPacketSize0 =
@@ -4437,7 +4464,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* authorization will assign the final address.
*/
if (udev->wusb == 0) {
- for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
+ for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) {
retval = hub_set_address(udev, devnum);
if (retval >= 0)
break;
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index d9ec2de6c4cf..65b6e6b84043 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -531,7 +531,7 @@ static ssize_t usb2_lpm_besl_store(struct device *dev,
}
static DEVICE_ATTR_RW(usb2_lpm_besl);
-static ssize_t usb3_hardware_lpm_show(struct device *dev,
+static ssize_t usb3_hardware_lpm_u1_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
@@ -539,7 +539,7 @@ static ssize_t usb3_hardware_lpm_show(struct device *dev,
usb_lock_device(udev);
- if (udev->usb3_lpm_enabled)
+ if (udev->usb3_lpm_u1_enabled)
p = "enabled";
else
p = "disabled";
@@ -548,7 +548,26 @@ static ssize_t usb3_hardware_lpm_show(struct device *dev,
return sprintf(buf, "%s\n", p);
}
-static DEVICE_ATTR_RO(usb3_hardware_lpm);
+static DEVICE_ATTR_RO(usb3_hardware_lpm_u1);
+
+static ssize_t usb3_hardware_lpm_u2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_device *udev = to_usb_device(dev);
+ const char *p;
+
+ usb_lock_device(udev);
+
+ if (udev->usb3_lpm_u2_enabled)
+ p = "enabled";
+ else
+ p = "disabled";
+
+ usb_unlock_device(udev);
+
+ return sprintf(buf, "%s\n", p);
+}
+static DEVICE_ATTR_RO(usb3_hardware_lpm_u2);
static struct attribute *usb2_hardware_lpm_attr[] = {
&dev_attr_usb2_hardware_lpm.attr,
@@ -562,7 +581,8 @@ static struct attribute_group usb2_hardware_lpm_attr_group = {
};
static struct attribute *usb3_hardware_lpm_attr[] = {
- &dev_attr_usb3_hardware_lpm.attr,
+ &dev_attr_usb3_hardware_lpm_u1.attr,
+ &dev_attr_usb3_hardware_lpm_u2.attr,
NULL,
};
static struct attribute_group usb3_hardware_lpm_attr_group = {
@@ -592,7 +612,8 @@ static int add_power_attributes(struct device *dev)
if (udev->usb2_hw_lpm_capable == 1)
rc = sysfs_merge_group(&dev->kobj,
&usb2_hardware_lpm_attr_group);
- if (udev->lpm_capable == 1)
+ if (udev->speed == USB_SPEED_SUPER &&
+ udev->lpm_capable == 1)
rc = sysfs_merge_group(&dev->kobj,
&usb3_hardware_lpm_attr_group);
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index f8bbd0b6d9fe..a717ddc3eae3 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -66,7 +66,7 @@ int usb_disabled(void)
EXPORT_SYMBOL_GPL(usb_disabled);
#ifdef CONFIG_PM
-static int usb_autosuspend_delay = 2; /* Default delay value,
+static int usb_autosuspend_delay = -1; /* Default delay value,
* in seconds */
module_param_named(autosuspend, usb_autosuspend_delay, int, 0644);
MODULE_PARM_DESC(autosuspend, "default autosuspend delay");
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 39c1cbf0e75d..000104111a8d 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -54,6 +54,37 @@
static const char dwc2_driver_name[] = "dwc2";
+static const struct dwc2_core_params params_hi6220 = {
+ .otg_cap = 2, /* No HNP/SRP capable */
+ .otg_ver = 0, /* 1.3 */
+ .dma_enable = 1,
+ .dma_desc_enable = 0,
+ .speed = 1, /* High Speed */
+ .enable_dynamic_fifo = 1,
+ .en_multiple_tx_fifo = 1,
+ .host_rx_fifo_size = 512,
+ .host_nperio_tx_fifo_size = 512,
+ .host_perio_tx_fifo_size = 512,
+ .max_transfer_size = 65535,
+ .max_packet_count = 511,
+ .host_channels = 16,
+ .phy_type = 1, /* UTMI */
+ .phy_utmi_width = 8,
+ .phy_ulpi_ddr = 0, /* Single */
+ .phy_ulpi_ext_vbus = 0,
+ .i2c_enable = 0,
+ .ulpi_fs_ls = 0,
+ .host_support_fs_ls_low_power = 0,
+ .host_ls_low_power_phy_clk = 0, /* 48 MHz */
+ .ts_dline = 0,
+ .reload_ctl = 0,
+ .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
+ GAHBCFG_HBSTLEN_SHIFT,
+ .uframe_sched = 0,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
static const struct dwc2_core_params params_bcm2835 = {
.otg_cap = 0, /* HNP/SRP capable */
.otg_ver = 0, /* 1.3 */
@@ -308,6 +339,7 @@ static int dwc2_driver_remove(struct platform_device *dev)
static const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "brcm,bcm2835-usb", .data = &params_bcm2835 },
+ { .compatible = "hisilicon,hi6220-usb", .data = &params_hi6220 },
{ .compatible = "rockchip,rk3066-usb", .data = &params_rk3066 },
{ .compatible = "snps,dwc2", .data = NULL },
{ .compatible = "samsung,s3c6400-hsotg", .data = NULL},
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 36f1cb74588c..78be201d81f4 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -853,7 +853,6 @@ struct dwc3 {
unsigned pullups_connected:1;
unsigned resize_fifos:1;
unsigned setup_packet_pending:1;
- unsigned start_config_issued:1;
unsigned three_stage_setup:1;
unsigned usb3_lpm_capable:1;
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 5320e939e090..b13912d5fa99 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -555,7 +555,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
int ret;
u32 reg;
- dwc->start_config_issued = false;
cfg = le16_to_cpu(ctrl->wValue);
switch (state) {
@@ -737,10 +736,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
break;
- case USB_REQ_SET_INTERFACE:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
- dwc->start_config_issued = false;
- /* Fall through */
default:
dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index a58376fd65fe..69ffe6e8d77f 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -388,24 +388,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
dep->trb_pool_dma = 0;
}
+static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
+
+/**
+ * dwc3_gadget_start_config - Configure EP resources
+ * @dwc: pointer to our controller context structure
+ * @dep: endpoint that is being enabled
+ *
+ * The assignment of transfer resources cannot perfectly follow the
+ * data book due to the fact that the controller driver does not have
+ * all knowledge of the configuration in advance. It is given this
+ * information piecemeal by the composite gadget framework after every
+ * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
+ * programming model in this scenario can cause errors. For two
+ * reasons:
+ *
+ * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
+ * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
+ * multiple interfaces.
+ *
+ * 2) The databook does not mention doing more DEPXFERCFG for new
+ * endpoint on alt setting (8.1.6).
+ *
+ * The following simplified method is used instead:
+ *
+ * All hardware endpoints can be assigned a transfer resource and this
+ * setting will stay persistent until either a core reset or
+ * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
+ * do DEPXFERCFG for every hardware endpoint as well. We are
+ * guaranteed that there are as many transfer resources as endpoints.
+ *
+ * This function is called for each endpoint when it is being enabled
+ * but is triggered only when called for EP0-out, which always happens
+ * first, and which should only happen in one of the above conditions.
+ */
static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
+ int i;
+ int ret;
+
+ if (dep->number)
+ return 0;
memset(&params, 0x00, sizeof(params));
+ cmd = DWC3_DEPCMD_DEPSTARTCFG;
- if (dep->number != 1) {
- cmd = DWC3_DEPCMD_DEPSTARTCFG;
- /* XferRscIdx == 0 for ep0 and 2 for the remaining */
- if (dep->number > 1) {
- if (dwc->start_config_issued)
- return 0;
- dwc->start_config_issued = true;
- cmd |= DWC3_DEPCMD_PARAM(2);
- }
+ ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
+ if (ret)
+ return ret;
- return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
+ for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+ struct dwc3_ep *dep = dwc->eps[i];
+
+ if (!dep)
+ continue;
+
+ ret = dwc3_gadget_set_xfer_resource(dwc, dep);
+ if (ret)
+ return ret;
}
return 0;
@@ -519,10 +561,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
struct dwc3_trb *trb_st_hw;
struct dwc3_trb *trb_link;
- ret = dwc3_gadget_set_xfer_resource(dwc, dep);
- if (ret)
- return ret;
-
dep->endpoint.desc = desc;
dep->comp_desc = comp_desc;
dep->type = usb_endpoint_type(desc);
@@ -1604,8 +1642,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
}
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
- dwc->start_config_issued = false;
-
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
@@ -2202,7 +2238,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
dwc3_disconnect_gadget(dwc);
- dwc->start_config_issued = false;
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->setup_packet_pending = false;
@@ -2253,7 +2288,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
dwc3_stop_active_transfers(dwc);
dwc3_clear_stall_all_ep(dwc);
- dwc->start_config_issued = false;
/* Reset device address to zero */
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c62109091d12..c2d65206ec6c 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -28,7 +28,9 @@
#include "xhci.h"
#include "xhci-trace.h"
-#define PORT2_SSIC_CONFIG_REG2 0x883c
+#define SSIC_PORT_NUM 2
+#define SSIC_PORT_CFG2 0x880c
+#define SSIC_PORT_CFG2_OFFSET 0x30
#define PROG_DONE (1 << 30)
#define SSIC_PORT_UNUSED (1 << 31)
@@ -45,6 +47,7 @@
#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
+#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
static const char hcd_name[] = "xhci_hcd";
@@ -152,7 +155,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
+ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
@@ -322,28 +326,36 @@ static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
u32 val;
void __iomem *reg;
+ int i;
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
- reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
-
- /* Notify SSIC that SSIC profile programming is not done */
- val = readl(reg) & ~PROG_DONE;
- writel(val, reg);
-
- /* Mark SSIC port as unused(suspend) or used(resume) */
- val = readl(reg);
- if (suspend)
- val |= SSIC_PORT_UNUSED;
- else
- val &= ~SSIC_PORT_UNUSED;
- writel(val, reg);
-
- /* Notify SSIC that SSIC profile programming is done */
- val = readl(reg) | PROG_DONE;
- writel(val, reg);
- readl(reg);
+ for (i = 0; i < SSIC_PORT_NUM; i++) {
+ reg = (void __iomem *) xhci->cap_regs +
+ SSIC_PORT_CFG2 +
+ i * SSIC_PORT_CFG2_OFFSET;
+
+ /*
+ * Notify SSIC that SSIC profile programming
+ * is not done.
+ */
+ val = readl(reg) & ~PROG_DONE;
+ writel(val, reg);
+
+ /* Mark SSIC port as unused(suspend) or used(resume) */
+ val = readl(reg);
+ if (suspend)
+ val |= SSIC_PORT_UNUSED;
+ else
+ val &= ~SSIC_PORT_UNUSED;
+ writel(val, reg);
+
+ /* Notify SSIC that SSIC profile programming is done */
+ val = readl(reg) | PROG_DONE;
+ writel(val, reg);
+ readl(reg);
+ }
}
reg = (void __iomem *) xhci->cap_regs + 0x80a4;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index eeaa6c6bd540..db0f0831b94f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2192,10 +2192,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
}
/* Fast path - was this the last TRB in the TD for this URB? */
} else if (event_trb == td->last_trb) {
- if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX)
- return finish_td(xhci, td, event_trb, event, ep,
- status, false);
-
if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
td->urb->actual_length =
td->urb->transfer_buffer_length -
@@ -2247,12 +2243,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
td->urb->actual_length +=
TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
-
- if (trb_comp_code == COMP_SHORT_TX) {
- xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n");
- td->urb_length_set = true;
- return 0;
- }
}
return finish_td(xhci, td, event_trb, event, ep, status, false);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 3f912705dcef..776d59c32bc5 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1549,7 +1549,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"HW died, freeing TD.");
urb_priv = urb->hcpriv;
- for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
+ for (i = urb_priv->td_cnt;
+ i < urb_priv->length && xhci->devs[urb->dev->slot_id];
+ i++) {
td = urb_priv->td[i];
if (!list_empty(&td->td_list))
list_del_init(&td->td_list);
@@ -5059,6 +5061,10 @@ static int __init xhci_hcd_init(void)
BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
/* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
+
+ if (usb_disabled())
+ return -ENODEV;
+
return 0;
}
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index c6bfd13f6c92..1950e87b4219 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -787,6 +787,12 @@ static int iowarrior_probe(struct usb_interface *interface,
iface_desc = interface->cur_altsetting;
dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
+ if (iface_desc->desc.bNumEndpoints < 1) {
+ dev_err(&interface->dev, "Invalid number of endpoints\n");
+ retval = -EINVAL;
+ goto error;
+ }
+
/* set up the endpoint information */
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 0d19a6d61a71..970a30e155cb 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -1599,6 +1599,8 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
&motg->id.nb);
if (ret < 0) {
dev_err(&pdev->dev, "register ID notifier failed\n");
+ extcon_unregister_notifier(motg->vbus.extcon,
+ EXTCON_USB, &motg->vbus.nb);
return ret;
}
@@ -1660,15 +1662,6 @@ static int msm_otg_probe(struct platform_device *pdev)
if (!motg)
return -ENOMEM;
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- if (!np)
- return -ENXIO;
- ret = msm_otg_read_dt(pdev, motg);
- if (ret)
- return ret;
- }
-
motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
GFP_KERNEL);
if (!motg->phy.otg)
@@ -1710,6 +1703,15 @@ static int msm_otg_probe(struct platform_device *pdev)
if (!motg->regs)
return -ENOMEM;
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ if (!np)
+ return -ENXIO;
+ ret = msm_otg_read_dt(pdev, motg);
+ if (ret)
+ return ret;
+ }
+
/*
* NOTE: The PHYs can be multiplexed between the chipidea controller
* and the dwc3 controller, using a single bit. It is important that
@@ -1717,8 +1719,10 @@ static int msm_otg_probe(struct platform_device *pdev)
*/
if (motg->phy_number) {
phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4);
- if (!phy_select)
- return -ENOMEM;
+ if (!phy_select) {
+ ret = -ENOMEM;
+ goto unregister_extcon;
+ }
/* Enable second PHY with the OTG port */
writel(0x1, phy_select);
}
@@ -1728,7 +1732,8 @@ static int msm_otg_probe(struct platform_device *pdev)
motg->irq = platform_get_irq(pdev, 0);
if (motg->irq < 0) {
dev_err(&pdev->dev, "platform_get_irq failed\n");
- return motg->irq;
+ ret = motg->irq;
+ goto unregister_extcon;
}
regs[0].supply = "vddcx";
@@ -1737,7 +1742,7 @@ static int msm_otg_probe(struct platform_device *pdev)
ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs);
if (ret)
- return ret;
+ goto unregister_extcon;
motg->vddcx = regs[0].consumer;
motg->v3p3 = regs[1].consumer;
@@ -1834,6 +1839,12 @@ disable_clks:
clk_disable_unprepare(motg->clk);
if (!IS_ERR(motg->core_clk))
clk_disable_unprepare(motg->core_clk);
+unregister_extcon:
+ extcon_unregister_notifier(motg->id.extcon,
+ EXTCON_USB_HOST, &motg->id.nb);
+ extcon_unregister_notifier(motg->vbus.extcon,
+ EXTCON_USB, &motg->vbus.nb);
+
return ret;
}
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index c0f5c652d272..f1893e08e51a 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -190,7 +190,8 @@ static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
goto __usbhs_pkt_handler_end;
}
- ret = func(pkt, &is_done);
+ if (likely(func))
+ ret = func(pkt, &is_done);
if (is_done)
__usbhsf_pkt_del(pkt);
@@ -889,6 +890,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
pkt->trans = len;
+ usbhsf_tx_irq_ctrl(pipe, 0);
INIT_WORK(&pkt->work, xfer_work);
schedule_work(&pkt->work);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 8f7a78e70975..fa14198daf77 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -158,10 +158,14 @@ static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
+ unsigned long flags;
ureq->req.actual = pkt->actual;
- usbhsg_queue_pop(uep, ureq, 0);
+ usbhs_lock(priv, flags);
+ if (uep)
+ __usbhsg_queue_pop(uep, ureq, 0);
+ usbhs_unlock(priv, flags);
}
static void usbhsg_queue_push(struct usbhsg_uep *uep,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 7d4f51a32e66..bdc0f2f24f19 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -98,6 +98,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
{ USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
+ { USB_DEVICE(0x10C4, 0x81D7) }, /* IAI Corp. RCB-CV-USB USB to RS485 Adaptor */
{ USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
{ USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
{ USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
@@ -160,6 +161,11 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
+ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+ { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
+ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
{ USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 01bf53392819..244acb1299a9 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -447,6 +447,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
struct usb_serial *serial = port->serial;
struct cypress_private *priv;
+ if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
+ dev_err(&port->dev, "required endpoint is missing\n");
+ return -ENODEV;
+ }
+
priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -606,12 +611,6 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
cypress_set_termios(tty, port, &priv->tmp_termios);
/* setup the port and start reading from the device */
- if (!port->interrupt_in_urb) {
- dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
- __func__);
- return -1;
- }
-
usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
port->interrupt_in_urb->transfer_buffer,
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 12b0e67473ba..3df7b7ec178e 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1251,8 +1251,27 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
static int digi_startup(struct usb_serial *serial)
{
+ struct device *dev = &serial->interface->dev;
struct digi_serial *serial_priv;
int ret;
+ int i;
+
+ /* check whether the device has the expected number of endpoints */
+ if (serial->num_port_pointers < serial->type->num_ports + 1) {
+ dev_err(dev, "OOB endpoints missing\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < serial->type->num_ports + 1 ; i++) {
+ if (!serial->port[i]->read_urb) {
+ dev_err(dev, "bulk-in endpoint missing\n");
+ return -ENODEV;
+ }
+ if (!serial->port[i]->write_urb) {
+ dev_err(dev, "bulk-out endpoint missing\n");
+ return -ENODEV;
+ }
+ }
serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
if (!serial_priv)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index a5a0376bbd48..b61f12160d37 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -824,6 +824,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
+ { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_SCU18) },
{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
/* Papouch devices based on FTDI chip */
@@ -1003,6 +1004,10 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
+ /* ICP DAS I-756xU devices */
+ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
+ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
+ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 67c6d4469730..c5d6c1e73e8e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -615,6 +615,7 @@
*/
#define RATOC_VENDOR_ID 0x0584
#define RATOC_PRODUCT_ID_USB60F 0xb020
+#define RATOC_PRODUCT_ID_SCU18 0xb03a
/*
* Infineon Technologies
@@ -871,6 +872,14 @@
#define NOVITUS_BONO_E_PID 0x6010
/*
+ * ICPDAS I-756*U devices
+ */
+#define ICPDAS_VID 0x1b5c
+#define ICPDAS_I7560U_PID 0x0103
+#define ICPDAS_I7561U_PID 0x0104
+#define ICPDAS_I7563U_PID 0x0105
+
+/*
* RT Systems programming cables for various ham radios
*/
#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index fd707d6a10e2..89726f702202 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -376,14 +376,21 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port,
static int mct_u232_port_probe(struct usb_serial_port *port)
{
+ struct usb_serial *serial = port->serial;
struct mct_u232_private *priv;
+ /* check first to simplify error handling */
+ if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
+ dev_err(&port->dev, "expected endpoint missing\n");
+ return -ENODEV;
+ }
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
/* Use second interrupt-in endpoint for reading. */
- priv->read_urb = port->serial->port[1]->interrupt_in_urb;
+ priv->read_urb = serial->port[1]->interrupt_in_urb;
priv->read_urb->context = port;
spin_lock_init(&priv->lock);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index f2280606b73c..c6f497f16526 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -268,6 +268,9 @@ static void option_instat_callback(struct urb *urb);
#define TELIT_PRODUCT_CC864_SINGLE 0x1006
#define TELIT_PRODUCT_DE910_DUAL 0x1010
#define TELIT_PRODUCT_UE910_V2 0x1012
+#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
+#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
+#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
@@ -313,6 +316,7 @@ static void option_instat_callback(struct urb *urb);
#define TOSHIBA_PRODUCT_G450 0x0d45
#define ALINK_VENDOR_ID 0x1e0e
+#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
#define ALINK_PRODUCT_PH300 0x9100
#define ALINK_PRODUCT_3GU 0x9200
@@ -605,6 +609,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
.reserved = BIT(3) | BIT(4),
};
+static const struct option_blacklist_info simcom_sim7100e_blacklist = {
+ .reserved = BIT(5) | BIT(6),
+};
+
static const struct option_blacklist_info telit_le910_blacklist = {
.sendsetup = BIT(0),
.reserved = BIT(1) | BIT(2),
@@ -615,6 +623,16 @@ static const struct option_blacklist_info telit_le920_blacklist = {
.reserved = BIT(1) | BIT(5),
};
+static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
+ .sendsetup = BIT(2),
+ .reserved = BIT(0) | BIT(1) | BIT(3),
+};
+
+static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(1) | BIT(2) | BIT(3),
+};
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1110,9 +1128,13 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
+ { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1160,6 +1182,12 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1629,6 +1657,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
{ USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
+ .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
},
@@ -1679,7 +1709,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
@@ -1788,6 +1818,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 9919d2a9faf2..1bc6089b9008 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -157,14 +157,17 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
- {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx/EM74xx */
- {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx/EM74xx */
+ {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
+ {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
+ {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
+ {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
{DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
+ {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 60afb39eb73c..337a0be89fcf 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -544,6 +544,11 @@ static int treo_attach(struct usb_serial *serial)
(serial->num_interrupt_in == 0))
return 0;
+ if (serial->num_bulk_in < 2 || serial->num_interrupt_in < 2) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
/*
* It appears that Treos and Kyoceras want to use the
* 1st bulk in endpoint to communicate with the 2nd bulk out endpoint,
@@ -597,8 +602,10 @@ static int clie_5_attach(struct usb_serial *serial)
*/
/* some sanity check */
- if (serial->num_ports < 2)
- return -1;
+ if (serial->num_bulk_out < 2) {
+ dev_err(&serial->interface->dev, "missing bulk out endpoints\n");
+ return -ENODEV;
+ }
/* port 0 now uses the modified endpoint Address */
port = serial->port[0];
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 5c66d3f7a6d0..9baf081174ce 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -2,7 +2,7 @@
* USB Attached SCSI
* Note that this is not the same as the USB Mass Storage driver
*
- * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2014
+ * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
* Copyright Matthew Wilcox for Intel Corp, 2010
* Copyright Sarah Sharp for Intel Corp, 2010
*
@@ -757,6 +757,17 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
return SUCCESS;
}
+static int uas_target_alloc(struct scsi_target *starget)
+{
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)
+ dev_to_shost(starget->dev.parent)->hostdata;
+
+ if (devinfo->flags & US_FL_NO_REPORT_LUNS)
+ starget->no_report_luns = 1;
+
+ return 0;
+}
+
static int uas_slave_alloc(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo =
@@ -800,7 +811,6 @@ static int uas_slave_configure(struct scsi_device *sdev)
if (devinfo->flags & US_FL_BROKEN_FUA)
sdev->broken_fua = 1;
- scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
return 0;
}
@@ -808,11 +818,12 @@ static struct scsi_host_template uas_host_template = {
.module = THIS_MODULE,
.name = "uas",
.queuecommand = uas_queuecommand,
+ .target_alloc = uas_target_alloc,
.slave_alloc = uas_slave_alloc,
.slave_configure = uas_slave_configure,
.eh_abort_handler = uas_eh_abort_handler,
.eh_bus_reset_handler = uas_eh_bus_reset_handler,
- .can_queue = 65536, /* Is there a limit on the _host_ ? */
+ .can_queue = MAX_CMNDS,
.this_id = -1,
.sg_tablesize = SG_NONE,
.skip_settle_delay = 1,
@@ -932,6 +943,12 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (result)
goto set_alt0;
+ /*
+ * 1 tag is reserved for untagged commands +
+ * 1 tag to avoid off by one errors in some bridge firmwares
+ */
+ shost->can_queue = devinfo->qdepth - 2;
+
usb_set_intfdata(intf, shost);
result = scsi_add_host(shost, &intf->dev);
if (result)
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index ccc113e83d88..53341a77d89f 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -64,6 +64,13 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: David Webb <djw@noc.ac.uk> */
+UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
+ "Seagate",
+ "Expansion Desk",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_LUNS),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
"Seagate",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 43576ed31ccd..9de988a0f856 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -482,7 +482,7 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
- US_FL_MAX_SECTORS_240);
+ US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
p = quirks;
while (*p) {
@@ -532,6 +532,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
case 'i':
f |= US_FL_IGNORE_DEVICE;
break;
+ case 'j':
+ f |= US_FL_NO_REPORT_LUNS;
+ break;
case 'l':
f |= US_FL_NOT_LOCKABLE;
break;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 56bf6dbb93db..9982cb176ce8 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -446,7 +446,8 @@ static long vfio_pci_ioctl(void *device_data,
info.num_regions = VFIO_PCI_NUM_REGIONS;
info.num_irqs = VFIO_PCI_NUM_IRQS;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct pci_dev *pdev = vdev->pdev;
@@ -520,7 +521,8 @@ static long vfio_pci_ioctl(void *device_data,
return -EINVAL;
}
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
struct vfio_irq_info info;
@@ -555,7 +557,8 @@ static long vfio_pci_ioctl(void *device_data,
else
info.flags |= VFIO_IRQ_INFO_NORESIZE;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index 418cdd9ba3f4..e65b142d3422 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -219,7 +219,8 @@ static long vfio_platform_ioctl(void *device_data,
info.num_regions = vdev->num_regions;
info.num_irqs = vdev->num_irqs;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct vfio_region_info info;
@@ -240,7 +241,8 @@ static long vfio_platform_ioctl(void *device_data,
info.size = vdev->regions[info.index].size;
info.flags = vdev->regions[info.index].flags;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
struct vfio_irq_info info;
@@ -259,7 +261,8 @@ static long vfio_platform_ioctl(void *device_data,
info.flags = vdev->irqs[info.index].flags;
info.count = vdev->irqs[info.index].count;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 59d47cb638d5..ecb826eefe02 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -999,7 +999,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
} else if (cmd == VFIO_IOMMU_MAP_DMA) {
struct vfio_iommu_type1_dma_map map;
@@ -1032,7 +1033,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
if (ret)
return ret;
- return copy_to_user((void __user *)arg, &unmap, minsz);
+ return copy_to_user((void __user *)arg, &unmap, minsz) ?
+ -EFAULT : 0;
}
return -ENOTTY;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 35d239ebf912..385e4da0d890 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -22,6 +22,7 @@ source "drivers/gpu/vga/Kconfig"
source "drivers/gpu/host1x/Kconfig"
source "drivers/gpu/ipu-v3/Kconfig"
+source "drivers/gpu/arm/Kconfig"
source "drivers/gpu/drm/Kconfig"
menu "Frame buffer Devices"
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 92f394927f24..6e92917ba77a 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -709,6 +709,7 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
}
if (!err) {
+ ops->cur_blink_jiffies = HZ / 5;
info->fbcon_par = ops;
if (vc)
@@ -956,6 +957,7 @@ static const char *fbcon_startup(void)
ops->currcon = -1;
ops->graphics = 1;
ops->cur_rotate = -1;
+ ops->cur_blink_jiffies = HZ / 5;
info->fbcon_par = ops;
p->con_rotate = initial_rotation;
set_blitting_type(vc, info);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 7efc32945810..7d3e5d0e9aa4 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -209,8 +209,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
*/
if (vb->num_pfns != 0)
tell_host(vb, vb->deflate_vq);
- mutex_unlock(&vb->balloon_lock);
release_pages_balloon(vb);
+ mutex_unlock(&vb->balloon_lock);
return num_freed_pages;
}
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 78f804af6c20..2046a68ad0ba 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -545,6 +545,7 @@ err_enable_device:
static void virtio_pci_remove(struct pci_dev *pci_dev)
{
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ struct device *dev = get_device(&vp_dev->vdev.dev);
unregister_virtio_device(&vp_dev->vdev);
@@ -554,6 +555,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
virtio_pci_modern_remove(vp_dev);
pci_disable_device(pci_dev);
+ put_device(dev);
}
static struct pci_driver virtio_pci_driver = {
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 8e5cf194cc0b..4469202eaa8e 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -17,6 +17,7 @@
*
*/
+#include <linux/delay.h>
#define VIRTIO_PCI_NO_LEGACY
#include "virtio_pci_common.h"
@@ -271,9 +272,13 @@ static void vp_reset(struct virtio_device *vdev)
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* 0 status means a reset. */
vp_iowrite8(0, &vp_dev->common->device_status);
- /* Flush out the status write, and flush in device writes,
- * including MSI-X interrupts, if any. */
- vp_ioread8(&vp_dev->common->device_status);
+ /* After writing 0 to device_status, the driver MUST wait for a read of
+ * device_status to return 0 before reinitializing the device.
+ * This will flush out the status write, and flush in device writes,
+ * including MSI-X interrupts, if any.
+ */
+ while (vp_ioread8(&vp_dev->common->device_status))
+ msleep(1);
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
}
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index 71e78ef4b736..3a75f3b53452 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -237,7 +237,7 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
/* Fall through */
case WDIOC_GETTIMEOUT:
- return copy_to_user(argp, &timeout, sizeof(int));
+ return copy_to_user(argp, &timeout, sizeof(int)) ? -EFAULT : 0;
default:
return -ENOTTY;
}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 524c22146429..44367783f07a 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -484,9 +484,19 @@ static void eoi_pirq(struct irq_data *data)
struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
int rc = 0;
- irq_move_irq(data);
+ if (!VALID_EVTCHN(evtchn))
+ return;
- if (VALID_EVTCHN(evtchn))
+ if (unlikely(irqd_is_setaffinity_pending(data))) {
+ int masked = test_and_set_mask(evtchn);
+
+ clear_evtchn(evtchn);
+
+ irq_move_masked_irq(data);
+
+ if (!masked)
+ unmask_evtchn(evtchn);
+ } else
clear_evtchn(evtchn);
if (pirq_needs_eoi(data->irq)) {
@@ -1357,9 +1367,19 @@ static void ack_dynirq(struct irq_data *data)
{
int evtchn = evtchn_from_irq(data->irq);
- irq_move_irq(data);
+ if (!VALID_EVTCHN(evtchn))
+ return;
- if (VALID_EVTCHN(evtchn))
+ if (unlikely(irqd_is_setaffinity_pending(data))) {
+ int masked = test_and_set_mask(evtchn);
+
+ clear_evtchn(evtchn);
+
+ irq_move_masked_irq(data);
+
+ if (!masked)
+ unmask_evtchn(evtchn);
+ } else
clear_evtchn(evtchn);
}
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 73dafdc494aa..fb0221434f81 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
/*
* PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
* to access the BARs where the MSI-X entries reside.
+ * But VF devices are unique in which the PF needs to be checked.
*/
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
return -ENXIO;
@@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data)
struct xen_pcibk_dev_data *dev_data = NULL;
struct xen_pci_op *op = &pdev->op;
int test_intx = 0;
+#ifdef CONFIG_PCI_MSI
+ unsigned int nr = 0;
+#endif
*op = pdev->sh_info->op;
barrier();
@@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data)
op->err = xen_pcibk_disable_msi(pdev, dev, op);
break;
case XEN_PCI_OP_enable_msix:
+ nr = op->value;
op->err = xen_pcibk_enable_msix(pdev, dev, op);
break;
case XEN_PCI_OP_disable_msix:
@@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data)
if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
unsigned int i;
- for (i = 0; i < op->value; i++)
+ for (i = 0; i < nr; i++)
pdev->sh_info->op.msix_entries[i].vector =
op->msix_entries[i].vector;
}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index ad4eb1024d1f..51387d75c7bf 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -939,12 +939,12 @@ out:
spin_unlock_irqrestore(&info->v2p_lock, flags);
out_free:
- mutex_lock(&tpg->tv_tpg_mutex);
- tpg->tv_tpg_fe_count--;
- mutex_unlock(&tpg->tv_tpg_mutex);
-
- if (err)
+ if (err) {
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tpg->tv_tpg_fe_count--;
+ mutex_unlock(&tpg->tv_tpg_mutex);
kfree(new);
+ }
return err;
}
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 3e36e4adc4a3..9aba42b78253 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -328,8 +328,8 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
list_add_tail(&work->ordered_list, &wq->ordered_list);
spin_unlock_irqrestore(&wq->list_lock, flags);
}
- queue_work(wq->normal_wq, &work->normal_work);
trace_btrfs_work_queued(work);
+ queue_work(wq->normal_wq, &work->normal_work);
}
void btrfs_queue_work(struct btrfs_workqueue *wq,
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index d453d62ab0c6..e2f659dc5745 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1417,7 +1417,8 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
read_extent_buffer(eb, dest + bytes_left,
name_off, name_len);
if (eb != eb_in) {
- btrfs_tree_read_unlock_blocking(eb);
+ if (!path->skip_locking)
+ btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
}
ret = btrfs_find_item(fs_root, path, parent, 0,
@@ -1437,9 +1438,10 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
eb = path->nodes[0];
/* make sure we can use eb after releasing the path */
if (eb != eb_in) {
- atomic_inc(&eb->refs);
- btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ if (!path->skip_locking)
+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ path->nodes[0] = NULL;
+ path->locks[0] = 0;
}
btrfs_release_path(path);
iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 35489e7129a7..385b449fd7ed 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1572,7 +1572,7 @@ struct btrfs_fs_info {
spinlock_t delayed_iput_lock;
struct list_head delayed_iputs;
- struct rw_semaphore delayed_iput_sem;
+ struct mutex cleaner_delayed_iput_mutex;
/* this protects tree_mod_seq_list */
spinlock_t tree_mod_seq_lock;
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index e0941fbb913c..02b934d0ee65 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1694,7 +1694,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
*
*/
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
- struct list_head *ins_list)
+ struct list_head *ins_list, bool *emitted)
{
struct btrfs_dir_item *di;
struct btrfs_delayed_item *curr, *next;
@@ -1738,6 +1738,7 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
if (over)
return 1;
+ *emitted = true;
}
return 0;
}
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index f70119f25421..0167853c84ae 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -144,7 +144,7 @@ void btrfs_put_delayed_items(struct list_head *ins_list,
int btrfs_should_delete_dir_index(struct list_head *del_list,
u64 index);
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
- struct list_head *ins_list);
+ struct list_head *ins_list, bool *emitted);
/* for init */
int __init btrfs_delayed_inode_init(void);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 974be09e7556..41fb43183406 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1582,8 +1582,23 @@ int btrfs_init_fs_root(struct btrfs_root *root)
ret = get_anon_bdev(&root->anon_dev);
if (ret)
goto free_writers;
+
+ mutex_lock(&root->objectid_mutex);
+ ret = btrfs_find_highest_objectid(root,
+ &root->highest_objectid);
+ if (ret) {
+ mutex_unlock(&root->objectid_mutex);
+ goto free_root_dev;
+ }
+
+ ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
+
+ mutex_unlock(&root->objectid_mutex);
+
return 0;
+free_root_dev:
+ free_anon_bdev(root->anon_dev);
free_writers:
btrfs_free_subvolume_writers(root->subv_writers);
fail:
@@ -1762,7 +1777,6 @@ static int cleaner_kthread(void *arg)
int again;
struct btrfs_trans_handle *trans;
- set_freezable();
do {
again = 0;
@@ -1782,7 +1796,10 @@ static int cleaner_kthread(void *arg)
goto sleep;
}
+ mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
btrfs_run_delayed_iputs(root);
+ mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
+
again = btrfs_clean_one_deleted_snapshot(root);
mutex_unlock(&root->fs_info->cleaner_mutex);
@@ -2542,8 +2559,8 @@ int open_ctree(struct super_block *sb,
mutex_init(&fs_info->delete_unused_bgs_mutex);
mutex_init(&fs_info->reloc_mutex);
mutex_init(&fs_info->delalloc_root_mutex);
+ mutex_init(&fs_info->cleaner_delayed_iput_mutex);
seqlock_init(&fs_info->profiles_lock);
- init_rwsem(&fs_info->delayed_iput_sem);
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
INIT_LIST_HEAD(&fs_info->space_info);
@@ -2668,6 +2685,7 @@ int open_ctree(struct super_block *sb,
if (btrfs_check_super_csum(bh->b_data)) {
printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
err = -EINVAL;
+ brelse(bh);
goto fail_alloc;
}
@@ -2900,6 +2918,18 @@ retry_root_backup:
tree_root->commit_root = btrfs_root_node(tree_root);
btrfs_set_root_refs(&tree_root->root_item, 1);
+ mutex_lock(&tree_root->objectid_mutex);
+ ret = btrfs_find_highest_objectid(tree_root,
+ &tree_root->highest_objectid);
+ if (ret) {
+ mutex_unlock(&tree_root->objectid_mutex);
+ goto recovery_tree_root;
+ }
+
+ ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
+
+ mutex_unlock(&tree_root->objectid_mutex);
+
ret = btrfs_read_roots(fs_info, tree_root);
if (ret)
goto recovery_tree_root;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index c4661db2b72a..2368cac1115a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4086,8 +4086,10 @@ commit_trans:
!atomic_read(&root->fs_info->open_ioctl_trans)) {
need_commit--;
- if (need_commit > 0)
+ if (need_commit > 0) {
+ btrfs_start_delalloc_roots(fs_info, 0, -1);
btrfs_wait_ordered_roots(fs_info, -1);
+ }
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
@@ -4100,11 +4102,12 @@ commit_trans:
if (ret)
return ret;
/*
- * make sure that all running delayed iput are
- * done
+ * The cleaner kthread might still be doing iput
+ * operations. Wait for it to finish so that
+ * more space is released.
*/
- down_write(&root->fs_info->delayed_iput_sem);
- up_write(&root->fs_info->delayed_iput_sem);
+ mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
+ mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
goto again;
} else {
btrfs_end_transaction(trans, root);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 0f09526aa7d9..5e5db3687e34 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1885,7 +1885,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
*/
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct dentry *dentry = file->f_path.dentry;
+ struct dentry *dentry = file_dentry(file);
struct inode *inode = d_inode(dentry);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 767a6056ac45..07573dc1614a 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -515,7 +515,7 @@ out:
return ret;
}
-static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
+int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
{
struct btrfs_path *path;
int ret;
@@ -555,13 +555,6 @@ int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
int ret;
mutex_lock(&root->objectid_mutex);
- if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
- ret = btrfs_find_highest_objectid(root,
- &root->highest_objectid);
- if (ret)
- goto out;
- }
-
if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
ret = -ENOSPC;
goto out;
diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h
index ddb347bfee23..c8e864b2d530 100644
--- a/fs/btrfs/inode-map.h
+++ b/fs/btrfs/inode-map.h
@@ -9,5 +9,6 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans);
int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
+int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid);
#endif
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a70c5790f8f5..4bc9dbf29a73 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3142,8 +3142,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
if (empty)
return;
- down_read(&fs_info->delayed_iput_sem);
-
spin_lock(&fs_info->delayed_iput_lock);
list_splice_init(&fs_info->delayed_iputs, &list);
spin_unlock(&fs_info->delayed_iput_lock);
@@ -3154,8 +3152,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
iput(delayed->inode);
kfree(delayed);
}
-
- up_read(&root->fs_info->delayed_iput_sem);
}
/*
@@ -5741,6 +5737,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
char *name_ptr;
int name_len;
int is_curr = 0; /* ctx->pos points to the current index? */
+ bool emitted;
/* FIXME, use a real flag for deciding about the key type */
if (root->fs_info->tree_root == root)
@@ -5769,6 +5766,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
if (ret < 0)
goto err;
+ emitted = false;
while (1) {
leaf = path->nodes[0];
slot = path->slots[0];
@@ -5848,6 +5846,7 @@ skip:
if (over)
goto nopos;
+ emitted = true;
di_len = btrfs_dir_name_len(leaf, di) +
btrfs_dir_data_len(leaf, di) + sizeof(*di);
di_cur += di_len;
@@ -5860,11 +5859,20 @@ next:
if (key_type == BTRFS_DIR_INDEX_KEY) {
if (is_curr)
ctx->pos++;
- ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
+ ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted);
if (ret)
goto nopos;
}
+ /*
+ * If we haven't emitted any dir entry, we must not touch ctx->pos as
+ * it was was set to the termination value in previous call. We assume
+ * that "." and ".." were emitted if we reach this point and set the
+ * termination value as well for an empty directory.
+ */
+ if (ctx->pos > 2 && !emitted)
+ goto nopos;
+
/* Reached end of directory/root. Bump pos past the last item. */
ctx->pos++;
@@ -6481,7 +6489,7 @@ out_unlock_inode:
static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
- struct btrfs_trans_handle *trans;
+ struct btrfs_trans_handle *trans = NULL;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = d_inode(old_dentry);
u64 index;
@@ -6507,6 +6515,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
trans = btrfs_start_transaction(root, 5);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
+ trans = NULL;
goto fail;
}
@@ -6540,9 +6549,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
btrfs_log_new_name(trans, inode, NULL, parent);
}
- btrfs_end_transaction(trans, root);
btrfs_balance_delayed_items(root);
fail:
+ if (trans)
+ btrfs_end_transaction(trans, root);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
@@ -7985,6 +7995,7 @@ static void btrfs_endio_direct_read(struct bio *bio)
kfree(dip);
+ dio_bio->bi_error = bio->bi_error;
dio_end_io(dio_bio, bio->bi_error);
if (io_bio->end_io)
@@ -8030,6 +8041,7 @@ out_test:
kfree(dip);
+ dio_bio->bi_error = bio->bi_error;
dio_end_io(dio_bio, bio->bi_error);
bio_put(bio);
}
@@ -8534,15 +8546,28 @@ int btrfs_readpage(struct file *file, struct page *page)
static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
{
struct extent_io_tree *tree;
-
+ struct inode *inode = page->mapping->host;
+ int ret;
if (current->flags & PF_MEMALLOC) {
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
+
+ /*
+ * If we are under memory pressure we will call this directly from the
+ * VM, we need to make sure we have the inode referenced for the ordered
+ * extent. If not just return like we didn't do anything.
+ */
+ if (!igrab(inode)) {
+ redirty_page_for_writepage(wbc, page);
+ return AOP_WRITEPAGE_ACTIVATE;
+ }
tree = &BTRFS_I(page->mapping->host)->io_tree;
- return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
+ ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
+ btrfs_add_delayed_iput(inode);
+ return ret;
}
static int btrfs_writepages(struct address_space *mapping,
@@ -9636,9 +9661,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
/*
* 2 items for inode item and ref
* 2 items for dir items
+ * 1 item for updating parent inode item
+ * 1 item for the inline extent item
* 1 item for xattr if selinux is on
*/
- trans = btrfs_start_transaction(root, 5);
+ trans = btrfs_start_transaction(root, 7);
if (IS_ERR(trans))
return PTR_ERR(trans);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index da94138eb85e..f07d01bc4875 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -568,6 +568,10 @@ static noinline int create_subvol(struct inode *dir,
goto fail;
}
+ mutex_lock(&new_root->objectid_mutex);
+ new_root->highest_objectid = new_dirid;
+ mutex_unlock(&new_root->objectid_mutex);
+
/*
* insert the directory item
*/
@@ -2782,24 +2786,29 @@ out:
static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
{
struct page *page;
- struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
page = grab_cache_page(inode->i_mapping, index);
if (!page)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (!PageUptodate(page)) {
- if (extent_read_full_page_nolock(tree, page, btrfs_get_extent,
- 0))
- return NULL;
+ int ret;
+
+ ret = btrfs_readpage(NULL, page);
+ if (ret)
+ return ERR_PTR(ret);
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
page_cache_release(page);
- return NULL;
+ return ERR_PTR(-EIO);
+ }
+ if (page->mapping != inode->i_mapping) {
+ unlock_page(page);
+ page_cache_release(page);
+ return ERR_PTR(-EAGAIN);
}
}
- unlock_page(page);
return page;
}
@@ -2811,17 +2820,31 @@ static int gather_extent_pages(struct inode *inode, struct page **pages,
pgoff_t index = off >> PAGE_CACHE_SHIFT;
for (i = 0; i < num_pages; i++) {
+again:
pages[i] = extent_same_get_page(inode, index + i);
- if (!pages[i])
- return -ENOMEM;
+ if (IS_ERR(pages[i])) {
+ int err = PTR_ERR(pages[i]);
+
+ if (err == -EAGAIN)
+ goto again;
+ pages[i] = NULL;
+ return err;
+ }
}
return 0;
}
-static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
+static int lock_extent_range(struct inode *inode, u64 off, u64 len,
+ bool retry_range_locking)
{
- /* do any pending delalloc/csum calc on src, one way or
- another, and lock file content */
+ /*
+ * Do any pending delalloc/csum calculations on inode, one way or
+ * another, and lock file content.
+ * The locking order is:
+ *
+ * 1) pages
+ * 2) range in the inode's io tree
+ */
while (1) {
struct btrfs_ordered_extent *ordered;
lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
@@ -2839,8 +2862,11 @@ static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
if (ordered)
btrfs_put_ordered_extent(ordered);
+ if (!retry_range_locking)
+ return -EAGAIN;
btrfs_wait_ordered_range(inode, off, len);
}
+ return 0;
}
static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
@@ -2865,15 +2891,24 @@ static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
}
-static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
- struct inode *inode2, u64 loff2, u64 len)
+static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
+ struct inode *inode2, u64 loff2, u64 len,
+ bool retry_range_locking)
{
+ int ret;
+
if (inode1 < inode2) {
swap(inode1, inode2);
swap(loff1, loff2);
}
- lock_extent_range(inode1, loff1, len);
- lock_extent_range(inode2, loff2, len);
+ ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
+ if (ret)
+ return ret;
+ ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
+ if (ret)
+ unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
+ loff1 + len - 1);
+ return ret;
}
struct cmp_pages {
@@ -2889,11 +2924,15 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp)
for (i = 0; i < cmp->num_pages; i++) {
pg = cmp->src_pages[i];
- if (pg)
+ if (pg) {
+ unlock_page(pg);
page_cache_release(pg);
+ }
pg = cmp->dst_pages[i];
- if (pg)
+ if (pg) {
+ unlock_page(pg);
page_cache_release(pg);
+ }
}
kfree(cmp->src_pages);
kfree(cmp->dst_pages);
@@ -2954,6 +2993,8 @@ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
src_page = cmp->src_pages[i];
dst_page = cmp->dst_pages[i];
+ ASSERT(PageLocked(src_page));
+ ASSERT(PageLocked(dst_page));
addr = kmap_atomic(src_page);
dst_addr = kmap_atomic(dst_page);
@@ -3066,14 +3107,46 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
goto out_unlock;
}
+again:
ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
if (ret)
goto out_unlock;
if (same_inode)
- lock_extent_range(src, same_lock_start, same_lock_len);
+ ret = lock_extent_range(src, same_lock_start, same_lock_len,
+ false);
else
- btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
+ ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
+ false);
+ /*
+ * If one of the inodes has dirty pages in the respective range or
+ * ordered extents, we need to flush dellaloc and wait for all ordered
+ * extents in the range. We must unlock the pages and the ranges in the
+ * io trees to avoid deadlocks when flushing delalloc (requires locking
+ * pages) and when waiting for ordered extents to complete (they require
+ * range locking).
+ */
+ if (ret == -EAGAIN) {
+ /*
+ * Ranges in the io trees already unlocked. Now unlock all
+ * pages before waiting for all IO to complete.
+ */
+ btrfs_cmp_data_free(&cmp);
+ if (same_inode) {
+ btrfs_wait_ordered_range(src, same_lock_start,
+ same_lock_len);
+ } else {
+ btrfs_wait_ordered_range(src, loff, len);
+ btrfs_wait_ordered_range(dst, dst_loff, len);
+ }
+ goto again;
+ }
+ ASSERT(ret == 0);
+ if (WARN_ON(ret)) {
+ /* ranges in the io trees already unlocked */
+ btrfs_cmp_data_free(&cmp);
+ return ret;
+ }
/* pass original length for comparison so we stay within i_size */
ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp);
@@ -3895,9 +3968,15 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
u64 lock_start = min_t(u64, off, destoff);
u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
- lock_extent_range(src, lock_start, lock_len);
+ ret = lock_extent_range(src, lock_start, lock_len, true);
} else {
- btrfs_double_extent_lock(src, off, inode, destoff, len);
+ ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
+ true);
+ }
+ ASSERT(ret == 0);
+ if (WARN_ON(ret)) {
+ /* ranges in the io trees already unlocked */
+ goto out_unlock;
}
ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 7cf8509deda7..2c849b08a91b 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -310,8 +310,16 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
err = btrfs_insert_fs_root(root->fs_info, root);
+ /*
+ * The root might have been inserted already, as before we look
+ * for orphan roots, log replay might have happened, which
+ * triggers a transaction commit and qgroup accounting, which
+ * in turn reads and inserts fs roots while doing backref
+ * walking.
+ */
+ if (err == -EEXIST)
+ err = 0;
if (err) {
- BUG_ON(err == -EEXIST);
btrfs_free_fs_root(root);
break;
}
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 355a458cba1a..63a6152be04b 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1469,7 +1469,21 @@ static int read_symlink(struct btrfs_root *root,
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
- BUG_ON(ret);
+ if (ret) {
+ /*
+ * An empty symlink inode. Can happen in rare error paths when
+ * creating a symlink (transaction committed before the inode
+ * eviction handler removed the symlink inode items and a crash
+ * happened in between or the subvol was snapshoted in between).
+ * Print an informative message to dmesg/syslog so that the user
+ * can delete the symlink.
+ */
+ btrfs_err(root->fs_info,
+ "Found empty symlink inode %llu at root %llu",
+ ino, root->root_key.objectid);
+ ret = -EIO;
+ goto out;
+ }
ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_file_extent_item);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 24154e422945..fe609b81dd1b 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1956,6 +1956,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
* there are other factors that may change the result (like a new metadata
* chunk).
*
+ * If metadata is exhausted, f_bavail will be 0.
+ *
* FIXME: not accurate for mixed block groups, total and free/used are ok,
* available appears slightly larger.
*/
@@ -1967,11 +1969,13 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
struct btrfs_space_info *found;
u64 total_used = 0;
u64 total_free_data = 0;
+ u64 total_free_meta = 0;
int bits = dentry->d_sb->s_blocksize_bits;
__be32 *fsid = (__be32 *)fs_info->fsid;
unsigned factor = 1;
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
int ret;
+ u64 thresh = 0;
/*
* holding chunk_muext to avoid allocating new chunks, holding
@@ -1997,6 +2001,8 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
}
}
}
+ if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
+ total_free_meta += found->disk_total - found->disk_used;
total_used += found->disk_used;
}
@@ -2019,6 +2025,24 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bavail += div_u64(total_free_data, factor);
buf->f_bavail = buf->f_bavail >> bits;
+ /*
+ * We calculate the remaining metadata space minus global reserve. If
+ * this is (supposedly) smaller than zero, there's no space. But this
+ * does not hold in practice, the exhausted state happens where's still
+ * some positive delta. So we apply some guesswork and compare the
+ * delta to a 4M threshold. (Practically observed delta was ~2M.)
+ *
+ * We probably cannot calculate the exact threshold value because this
+ * depends on the internal reservations requested by various
+ * operations, so some operations that consume a few metadata will
+ * succeed even if the Avail is zero. But this is better than the other
+ * way around.
+ */
+ thresh = 4 * 1024 * 1024;
+
+ if (total_free_meta - thresh < block_rsv->size)
+ buf->f_bavail = 0;
+
buf->f_type = BTRFS_SUPER_MAGIC;
buf->f_bsize = dentry->d_sb->s_blocksize;
buf->f_namelen = BTRFS_NAME_LEN;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 323e12cc9d2f..0e044d7ee721 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4406,6 +4406,127 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
return ret;
}
+/*
+ * When we are logging a new inode X, check if it doesn't have a reference that
+ * matches the reference from some other inode Y created in a past transaction
+ * and that was renamed in the current transaction. If we don't do this, then at
+ * log replay time we can lose inode Y (and all its files if it's a directory):
+ *
+ * mkdir /mnt/x
+ * echo "hello world" > /mnt/x/foobar
+ * sync
+ * mv /mnt/x /mnt/y
+ * mkdir /mnt/x # or touch /mnt/x
+ * xfs_io -c fsync /mnt/x
+ * <power fail>
+ * mount fs, trigger log replay
+ *
+ * After the log replay procedure, we would lose the first directory and all its
+ * files (file foobar).
+ * For the case where inode Y is not a directory we simply end up losing it:
+ *
+ * echo "123" > /mnt/foo
+ * sync
+ * mv /mnt/foo /mnt/bar
+ * echo "abc" > /mnt/foo
+ * xfs_io -c fsync /mnt/foo
+ * <power fail>
+ *
+ * We also need this for cases where a snapshot entry is replaced by some other
+ * entry (file or directory) otherwise we end up with an unreplayable log due to
+ * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
+ * if it were a regular entry:
+ *
+ * mkdir /mnt/x
+ * btrfs subvolume snapshot /mnt /mnt/x/snap
+ * btrfs subvolume delete /mnt/x/snap
+ * rmdir /mnt/x
+ * mkdir /mnt/x
+ * fsync /mnt/x or fsync some new file inside it
+ * <power fail>
+ *
+ * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
+ * the same transaction.
+ */
+static int btrfs_check_ref_name_override(struct extent_buffer *eb,
+ const int slot,
+ const struct btrfs_key *key,
+ struct inode *inode)
+{
+ int ret;
+ struct btrfs_path *search_path;
+ char *name = NULL;
+ u32 name_len = 0;
+ u32 item_size = btrfs_item_size_nr(eb, slot);
+ u32 cur_offset = 0;
+ unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
+
+ search_path = btrfs_alloc_path();
+ if (!search_path)
+ return -ENOMEM;
+ search_path->search_commit_root = 1;
+ search_path->skip_locking = 1;
+
+ while (cur_offset < item_size) {
+ u64 parent;
+ u32 this_name_len;
+ u32 this_len;
+ unsigned long name_ptr;
+ struct btrfs_dir_item *di;
+
+ if (key->type == BTRFS_INODE_REF_KEY) {
+ struct btrfs_inode_ref *iref;
+
+ iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
+ parent = key->offset;
+ this_name_len = btrfs_inode_ref_name_len(eb, iref);
+ name_ptr = (unsigned long)(iref + 1);
+ this_len = sizeof(*iref) + this_name_len;
+ } else {
+ struct btrfs_inode_extref *extref;
+
+ extref = (struct btrfs_inode_extref *)(ptr +
+ cur_offset);
+ parent = btrfs_inode_extref_parent(eb, extref);
+ this_name_len = btrfs_inode_extref_name_len(eb, extref);
+ name_ptr = (unsigned long)&extref->name;
+ this_len = sizeof(*extref) + this_name_len;
+ }
+
+ if (this_name_len > name_len) {
+ char *new_name;
+
+ new_name = krealloc(name, this_name_len, GFP_NOFS);
+ if (!new_name) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ name_len = this_name_len;
+ name = new_name;
+ }
+
+ read_extent_buffer(eb, name, name_ptr, this_name_len);
+ di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root,
+ search_path, parent,
+ name, this_name_len, 0);
+ if (di && !IS_ERR(di)) {
+ ret = 1;
+ goto out;
+ } else if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ goto out;
+ }
+ btrfs_release_path(search_path);
+
+ cur_offset += this_len;
+ }
+ ret = 0;
+out:
+ btrfs_free_path(search_path);
+ kfree(name);
+ return ret;
+}
+
/* log a single inode in the tree log.
* At least one parent directory for this inode must exist in the tree
* or be logged already.
@@ -4578,6 +4699,22 @@ again:
if (min_key.type == BTRFS_INODE_ITEM_KEY)
need_log_inode_item = false;
+ if ((min_key.type == BTRFS_INODE_REF_KEY ||
+ min_key.type == BTRFS_INODE_EXTREF_KEY) &&
+ BTRFS_I(inode)->generation == trans->transid) {
+ ret = btrfs_check_ref_name_override(path->nodes[0],
+ path->slots[0],
+ &min_key, inode);
+ if (ret < 0) {
+ err = ret;
+ goto out_unlock;
+ } else if (ret > 0) {
+ err = 1;
+ btrfs_set_log_full_commit(root->fs_info, trans);
+ goto out_unlock;
+ }
+ }
+
/* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
if (ins_nr == 0)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a23399e8e3ab..9c62a6f9757a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -232,6 +232,7 @@ static struct btrfs_device *__alloc_device(void)
spin_lock_init(&dev->reada_lock);
atomic_set(&dev->reada_in_flight, 0);
atomic_set(&dev->dev_stats_ccnt, 0);
+ btrfs_device_data_ordered_init(dev);
INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
@@ -1257,6 +1258,15 @@ int find_free_dev_extent_start(struct btrfs_transaction *transaction,
int ret;
int slot;
struct extent_buffer *l;
+ u64 min_search_start;
+
+ /*
+ * We don't want to overwrite the superblock on the drive nor any area
+ * used by the boot loader (grub for example), so we make sure to start
+ * at an offset of at least 1MB.
+ */
+ min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
+ search_start = max(search_start, min_search_start);
path = btrfs_alloc_path();
if (!path)
@@ -1397,18 +1407,9 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 num_bytes,
u64 *start, u64 *len)
{
- struct btrfs_root *root = device->dev_root;
- u64 search_start;
-
/* FIXME use last free of some kind */
-
- /*
- * we don't want to overwrite the superblock on the drive,
- * so we make sure to start at an offset of at least 1MB
- */
- search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
return find_free_dev_extent_start(trans->transaction, device,
- num_bytes, search_start, start, len);
+ num_bytes, 0, start, len);
}
static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
@@ -6512,6 +6513,14 @@ int btrfs_read_sys_array(struct btrfs_root *root)
goto out_short_read;
num_stripes = btrfs_chunk_num_stripes(sb, chunk);
+ if (!num_stripes) {
+ printk(KERN_ERR
+ "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
+ num_stripes, cur_offset);
+ ret = -EIO;
+ break;
+ }
+
len = btrfs_chunk_item_size(num_stripes);
if (cur_offset + len > array_size)
goto out_short_read;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 7febcf2475c5..50b268483302 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -50,7 +50,7 @@ void cifs_vfs_err(const char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &args;
- pr_err("CIFS VFS: %pV", &vaf);
+ pr_err_ratelimited("CIFS VFS: %pV", &vaf);
va_end(args);
}
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index f40fbaca1b2a..66cf0f9fff89 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -51,14 +51,13 @@ __printf(1, 2) void cifs_vfs_err(const char *fmt, ...);
/* information message: e.g., configuration, major event */
#define cifs_dbg(type, fmt, ...) \
do { \
- if (type == FYI) { \
- if (cifsFYI & CIFS_INFO) { \
- pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__); \
- } \
+ if (type == FYI && cifsFYI & CIFS_INFO) { \
+ pr_debug_ratelimited("%s: " \
+ fmt, __FILE__, ##__VA_ARGS__); \
} else if (type == VFS) { \
cifs_vfs_err(fmt, ##__VA_ARGS__); \
} else if (type == NOISY && type != 0) { \
- pr_debug(fmt, ##__VA_ARGS__); \
+ pr_debug_ratelimited(fmt, ##__VA_ARGS__); \
} \
} while (0)
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index afa09fce8151..e682b36a210f 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -714,7 +714,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
if (!ses->auth_key.response) {
- rc = ENOMEM;
+ rc = -ENOMEM;
ses->auth_key.len = 0;
goto setup_ntlmv2_rsp_ret;
}
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index c3cc1609025f..44b3d4280abb 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -31,19 +31,15 @@
* so that it will fit. We use hash_64 to convert the value to 31 bits, and
* then add 1, to ensure that we don't end up with a 0 as the value.
*/
-#if BITS_PER_LONG == 64
static inline ino_t
cifs_uniqueid_to_ino_t(u64 fileid)
{
+ if ((sizeof(ino_t)) < (sizeof(u64)))
+ return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
+
return (ino_t)fileid;
+
}
-#else
-static inline ino_t
-cifs_uniqueid_to_ino_t(u64 fileid)
-{
- return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
-}
-#endif
extern struct file_system_type cifs_fs_type;
extern const struct address_space_operations cifs_addr_ops;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 90b4f9f7de66..76fcb50295a3 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1396,11 +1396,10 @@ openRetry:
* current bigbuf.
*/
static int
-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+discard_remaining_data(struct TCP_Server_Info *server)
{
unsigned int rfclen = get_rfc1002_length(server->smallbuf);
int remaining = rfclen + 4 - server->total_read;
- struct cifs_readdata *rdata = mid->callback_data;
while (remaining > 0) {
int length;
@@ -1414,10 +1413,20 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
remaining -= length;
}
- dequeue_mid(mid, rdata->result);
return 0;
}
+static int
+cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+{
+ int length;
+ struct cifs_readdata *rdata = mid->callback_data;
+
+ length = discard_remaining_data(server);
+ dequeue_mid(mid, rdata->result);
+ return length;
+}
+
int
cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
@@ -1446,6 +1455,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
return length;
server->total_read += length;
+ if (server->ops->is_status_pending &&
+ server->ops->is_status_pending(buf, server, 0)) {
+ discard_remaining_data(server);
+ return -1;
+ }
+
/* Was the SMB read successful? */
rdata->result = server->ops->map_error(buf, false);
if (rdata->result != 0) {
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index ecb0803bdb0e..3c194ff0d2f0 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -368,7 +368,6 @@ cifs_reconnect(struct TCP_Server_Info *server)
server->session_key.response = NULL;
server->session_key.len = 0;
server->lstrp = jiffies;
- mutex_unlock(&server->srv_mutex);
/* mark submitted MIDs for retry and issue callback */
INIT_LIST_HEAD(&retry_list);
@@ -381,6 +380,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
list_move(&mid_entry->qhead, &retry_list);
}
spin_unlock(&GlobalMid_Lock);
+ mutex_unlock(&server->srv_mutex);
cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
list_for_each_safe(tmp, tmp2, &retry_list) {
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 0557c45e9c33..b30a4a6d98a0 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -847,6 +847,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
* if buggy server returns . and .. late do we want to
* check for that here?
*/
+ *tmp_buf = 0;
rc = cifs_filldir(current_entry, file, ctx,
tmp_buf, max_len);
if (rc) {
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 767555518d40..373b5cd1c913 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1109,21 +1109,25 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
{
char *data_offset;
struct create_context *cc;
- unsigned int next = 0;
+ unsigned int next;
+ unsigned int remaining;
char *name;
data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset);
+ remaining = le32_to_cpu(rsp->CreateContextsLength);
cc = (struct create_context *)data_offset;
- do {
- cc = (struct create_context *)((char *)cc + next);
+ while (remaining >= sizeof(struct create_context)) {
name = le16_to_cpu(cc->NameOffset) + (char *)cc;
- if (le16_to_cpu(cc->NameLength) != 4 ||
- strncmp(name, "RqLs", 4)) {
- next = le32_to_cpu(cc->Next);
- continue;
- }
- return server->ops->parse_lease_buf(cc, epoch);
- } while (next != 0);
+ if (le16_to_cpu(cc->NameLength) == 4 &&
+ strncmp(name, "RqLs", 4) == 0)
+ return server->ops->parse_lease_buf(cc, epoch);
+
+ next = le32_to_cpu(cc->Next);
+ if (!next)
+ break;
+ remaining -= next;
+ cc = (struct create_context *)((char *)cc + next);
+ }
return 0;
}
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 2a24c524fb9a..87abe8ed074c 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -576,14 +576,16 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
cifs_in_send_dec(server);
cifs_save_when_sent(mid);
- if (rc < 0)
+ if (rc < 0) {
server->sequence_number -= 2;
+ cifs_delete_mid(mid);
+ }
+
mutex_unlock(&server->srv_mutex);
if (rc == 0)
return 0;
- cifs_delete_mid(mid);
add_credits_and_wake_if(server, credits, optype);
return rc;
}
diff --git a/fs/coredump.c b/fs/coredump.c
index 1777331eee76..dfc87c5f5a54 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -32,6 +32,9 @@
#include <linux/pipe_fs_i.h>
#include <linux/oom.h>
#include <linux/compat.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/path.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
@@ -627,6 +630,8 @@ void do_coredump(const siginfo_t *siginfo)
}
} else {
struct inode *inode;
+ int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
+ O_LARGEFILE | O_EXCL;
if (cprm.limit < binfmt->min_coredump)
goto fail_unlock;
@@ -665,10 +670,27 @@ void do_coredump(const siginfo_t *siginfo)
* what matters is that at least one of the two processes
* writes its coredump successfully, not which one.
*/
- cprm.file = filp_open(cn.corename,
- O_CREAT | 2 | O_NOFOLLOW |
- O_LARGEFILE | O_EXCL,
- 0600);
+ if (need_suid_safe) {
+ /*
+ * Using user namespaces, normal user tasks can change
+ * their current->fs->root to point to arbitrary
+ * directories. Since the intention of the "only dump
+ * with a fully qualified path" rule is to control where
+ * coredumps may be placed using root privileges,
+ * current->fs->root must not be used. Instead, use the
+ * root directory of init_task.
+ */
+ struct path root;
+
+ task_lock(&init_task);
+ get_fs_root(init_task.fs, &root);
+ task_unlock(&init_task);
+ cprm.file = file_open_root(root.dentry, root.mnt,
+ cn.corename, open_flags, 0600);
+ path_put(&root);
+ } else {
+ cprm.file = filp_open(cn.corename, open_flags, 0600);
+ }
if (IS_ERR(cprm.file))
goto fail_unlock;
diff --git a/fs/dcache.c b/fs/dcache.c
index 065da6950eb4..240935d77844 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -269,9 +269,6 @@ static inline int dname_external(const struct dentry *dentry)
return dentry->d_name.name != dentry->d_iname;
}
-/*
- * Make sure other CPUs see the inode attached before the type is set.
- */
static inline void __d_set_inode_and_type(struct dentry *dentry,
struct inode *inode,
unsigned type_flags)
@@ -279,28 +276,18 @@ static inline void __d_set_inode_and_type(struct dentry *dentry,
unsigned flags;
dentry->d_inode = inode;
- smp_wmb();
flags = READ_ONCE(dentry->d_flags);
flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
flags |= type_flags;
WRITE_ONCE(dentry->d_flags, flags);
}
-/*
- * Ideally, we want to make sure that other CPUs see the flags cleared before
- * the inode is detached, but this is really a violation of RCU principles
- * since the ordering suggests we should always set inode before flags.
- *
- * We should instead replace or discard the entire dentry - but that sucks
- * performancewise on mass deletion/rename.
- */
static inline void __d_clear_type_and_inode(struct dentry *dentry)
{
unsigned flags = READ_ONCE(dentry->d_flags);
flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
WRITE_ONCE(dentry->d_flags, flags);
- smp_wmb();
dentry->d_inode = NULL;
}
@@ -370,9 +357,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
__releases(dentry->d_inode->i_lock)
{
struct inode *inode = dentry->d_inode;
+
+ raw_write_seqcount_begin(&dentry->d_seq);
__d_clear_type_and_inode(dentry);
hlist_del_init(&dentry->d_u.d_alias);
- dentry_rcuwalk_invalidate(dentry);
+ raw_write_seqcount_end(&dentry->d_seq);
spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
if (!inode->i_nlink)
@@ -1677,7 +1666,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
DCACHE_OP_REVALIDATE |
DCACHE_OP_WEAK_REVALIDATE |
DCACHE_OP_DELETE |
- DCACHE_OP_SELECT_INODE));
+ DCACHE_OP_SELECT_INODE |
+ DCACHE_OP_REAL));
dentry->d_op = op;
if (!op)
return;
@@ -1695,6 +1685,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
dentry->d_flags |= DCACHE_OP_PRUNE;
if (op->d_select_inode)
dentry->d_flags |= DCACHE_OP_SELECT_INODE;
+ if (op->d_real)
+ dentry->d_flags |= DCACHE_OP_REAL;
}
EXPORT_SYMBOL(d_set_d_op);
@@ -1757,8 +1749,9 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
spin_lock(&dentry->d_lock);
if (inode)
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
+ raw_write_seqcount_begin(&dentry->d_seq);
__d_set_inode_and_type(dentry, inode, add_flags);
- dentry_rcuwalk_invalidate(dentry);
+ raw_write_seqcount_end(&dentry->d_seq);
spin_unlock(&dentry->d_lock);
fsnotify_d_instantiate(dentry, inode);
}
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index c35ffdc12bba..706de324f2a6 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -575,6 +575,26 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
mutex_unlock(&allocated_ptys_lock);
}
+/*
+ * pty code needs to hold extra references in case of last /dev/tty close
+ */
+
+void devpts_add_ref(struct inode *ptmx_inode)
+{
+ struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+
+ atomic_inc(&sb->s_active);
+ ihold(ptmx_inode);
+}
+
+void devpts_del_ref(struct inode *ptmx_inode)
+{
+ struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+
+ iput(ptmx_inode);
+ deactivate_super(sb);
+}
+
/**
* devpts_pty_new -- create a new inode in /dev/pts/
* @ptmx_inode: inode of the master
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 602e8441bc0f..01171d8a6ee9 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -472,8 +472,8 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
dio->io_error = -EIO;
if (dio->is_async && dio->rw == READ && dio->should_dirty) {
- bio_check_pages_dirty(bio); /* transfers ownership */
err = bio->bi_error;
+ bio_check_pages_dirty(bio); /* transfers ownership */
} else {
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index 90001da9abfd..66842e55c48c 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -10,6 +10,7 @@
#include <linux/efi.h>
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/mount.h>
#include "internal.h"
@@ -103,9 +104,78 @@ out_free:
return size;
}
+static int
+efivarfs_ioc_getxflags(struct file *file, void __user *arg)
+{
+ struct inode *inode = file->f_mapping->host;
+ unsigned int i_flags;
+ unsigned int flags = 0;
+
+ i_flags = inode->i_flags;
+ if (i_flags & S_IMMUTABLE)
+ flags |= FS_IMMUTABLE_FL;
+
+ if (copy_to_user(arg, &flags, sizeof(flags)))
+ return -EFAULT;
+ return 0;
+}
+
+static int
+efivarfs_ioc_setxflags(struct file *file, void __user *arg)
+{
+ struct inode *inode = file->f_mapping->host;
+ unsigned int flags;
+ unsigned int i_flags = 0;
+ int error;
+
+ if (!inode_owner_or_capable(inode))
+ return -EACCES;
+
+ if (copy_from_user(&flags, arg, sizeof(flags)))
+ return -EFAULT;
+
+ if (flags & ~FS_IMMUTABLE_FL)
+ return -EOPNOTSUPP;
+
+ if (!capable(CAP_LINUX_IMMUTABLE))
+ return -EPERM;
+
+ if (flags & FS_IMMUTABLE_FL)
+ i_flags |= S_IMMUTABLE;
+
+
+ error = mnt_want_write_file(file);
+ if (error)
+ return error;
+
+ mutex_lock(&inode->i_mutex);
+ inode_set_flags(inode, i_flags, S_IMMUTABLE);
+ mutex_unlock(&inode->i_mutex);
+
+ mnt_drop_write_file(file);
+
+ return 0;
+}
+
+long
+efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p)
+{
+ void __user *arg = (void __user *)p;
+
+ switch (cmd) {
+ case FS_IOC_GETFLAGS:
+ return efivarfs_ioc_getxflags(file, arg);
+ case FS_IOC_SETFLAGS:
+ return efivarfs_ioc_setxflags(file, arg);
+ }
+
+ return -ENOTTY;
+}
+
const struct file_operations efivarfs_file_operations = {
.open = simple_open,
.read = efivarfs_file_read,
.write = efivarfs_file_write,
.llseek = no_llseek,
+ .unlocked_ioctl = efivarfs_file_ioctl,
};
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index 3381b9da9ee6..e2ab6d0497f2 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -15,7 +15,8 @@
#include "internal.h"
struct inode *efivarfs_get_inode(struct super_block *sb,
- const struct inode *dir, int mode, dev_t dev)
+ const struct inode *dir, int mode,
+ dev_t dev, bool is_removable)
{
struct inode *inode = new_inode(sb);
@@ -23,6 +24,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
switch (mode & S_IFMT) {
case S_IFREG:
inode->i_fop = &efivarfs_file_operations;
@@ -102,22 +104,17 @@ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
static int efivarfs_create(struct inode *dir, struct dentry *dentry,
umode_t mode, bool excl)
{
- struct inode *inode;
+ struct inode *inode = NULL;
struct efivar_entry *var;
int namelen, i = 0, err = 0;
+ bool is_removable = false;
if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
return -EINVAL;
- inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
- if (!inode)
- return -ENOMEM;
-
var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
- if (!var) {
- err = -ENOMEM;
- goto out;
- }
+ if (!var)
+ return -ENOMEM;
/* length of the variable name itself: remove GUID and separator */
namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
@@ -125,6 +122,16 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
&var->var.VendorGuid);
+ if (efivar_variable_is_removable(var->var.VendorGuid,
+ dentry->d_name.name, namelen))
+ is_removable = true;
+
+ inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable);
+ if (!inode) {
+ err = -ENOMEM;
+ goto out;
+ }
+
for (i = 0; i < namelen; i++)
var->var.VariableName[i] = dentry->d_name.name[i];
@@ -138,7 +145,8 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
out:
if (err) {
kfree(var);
- iput(inode);
+ if (inode)
+ iput(inode);
}
return err;
}
diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
index b5ff16addb7c..b4505188e799 100644
--- a/fs/efivarfs/internal.h
+++ b/fs/efivarfs/internal.h
@@ -15,7 +15,8 @@ extern const struct file_operations efivarfs_file_operations;
extern const struct inode_operations efivarfs_dir_inode_operations;
extern bool efivarfs_valid_name(const char *str, int len);
extern struct inode *efivarfs_get_inode(struct super_block *sb,
- const struct inode *dir, int mode, dev_t dev);
+ const struct inode *dir, int mode, dev_t dev,
+ bool is_removable);
extern struct list_head efivarfs_list;
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 86a2121828c3..abb244b06024 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -118,8 +118,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
struct dentry *dentry, *root = sb->s_root;
unsigned long size = 0;
char *name;
- int len, i;
+ int len;
int err = -ENOMEM;
+ bool is_removable = false;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
@@ -128,15 +129,17 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
memcpy(entry->var.VariableName, name16, name_size);
memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
- len = ucs2_strlen(entry->var.VariableName);
+ len = ucs2_utf8size(entry->var.VariableName);
/* name, plus '-', plus GUID, plus NUL*/
name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
if (!name)
goto fail;
- for (i = 0; i < len; i++)
- name[i] = entry->var.VariableName[i] & 0xFF;
+ ucs2_as_utf8(name, entry->var.VariableName, len);
+
+ if (efivar_variable_is_removable(entry->var.VendorGuid, name, len))
+ is_removable = true;
name[len] = '-';
@@ -144,7 +147,8 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
- inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0);
+ inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0,
+ is_removable);
if (!inode)
goto fail_name;
@@ -200,7 +204,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_d_op = &efivarfs_d_ops;
sb->s_time_gran = 1;
- inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
+ inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true);
if (!inode)
return -ENOMEM;
inode->i_op = &efivarfs_dir_inode_operations;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index ec0668a60678..fe1f50fe764f 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -191,7 +191,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
/* If checksum is bad mark all blocks used to prevent allocation
* essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
- ext4_error(sb, "Checksum bad for group %u", block_group);
grp = ext4_get_group_info(sb, block_group);
if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
percpu_counter_sub(&sbi->s_freeclusters_counter,
@@ -442,14 +441,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
}
ext4_lock_group(sb, block_group);
if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
-
err = ext4_init_block_bitmap(sb, bh, block_group, desc);
set_bitmap_uptodate(bh);
set_buffer_uptodate(bh);
ext4_unlock_group(sb, block_group);
unlock_buffer(bh);
- if (err)
+ if (err) {
+ ext4_error(sb, "Failed to init block bitmap for group "
+ "%u: %d", block_group, err);
goto out;
+ }
goto verify;
}
ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
index c5882b36e558..9a16d1e75a49 100644
--- a/fs/ext4/crypto_key.c
+++ b/fs/ext4/crypto_key.c
@@ -213,9 +213,11 @@ retry:
res = -ENOKEY;
goto out;
}
+ down_read(&keyring_key->sem);
ukp = user_key_payload(keyring_key);
if (ukp->datalen != sizeof(struct ext4_encryption_key)) {
res = -EINVAL;
+ up_read(&keyring_key->sem);
goto out;
}
master_key = (struct ext4_encryption_key *)ukp->data;
@@ -226,10 +228,12 @@ retry:
"ext4: key size incorrect: %d\n",
master_key->size);
res = -ENOKEY;
+ up_read(&keyring_key->sem);
goto out;
}
res = ext4_derive_key_aes(ctx.nonce, master_key->raw,
raw_key);
+ up_read(&keyring_key->sem);
if (res)
goto out;
got_key:
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 5cf6d8be48dd..786cb51cab56 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -850,6 +850,29 @@ do { \
#include "extents_status.h"
/*
+ * Lock subclasses for i_data_sem in the ext4_inode_info structure.
+ *
+ * These are needed to avoid lockdep false positives when we need to
+ * allocate blocks to the quota inode during ext4_map_blocks(), while
+ * holding i_data_sem for a normal (non-quota) inode. Since we don't
+ * do quota tracking for the quota inode, this avoids deadlock (as
+ * well as infinite recursion, since it isn't turtles all the way
+ * down...)
+ *
+ * I_DATA_SEM_NORMAL - Used for most inodes
+ * I_DATA_SEM_OTHER - Used by move_inode.c for the second normal inode
+ * where the second inode has larger inode number
+ * than the first
+ * I_DATA_SEM_QUOTA - Used for quota inodes only
+ */
+enum {
+ I_DATA_SEM_NORMAL = 0,
+ I_DATA_SEM_OTHER,
+ I_DATA_SEM_QUOTA,
+};
+
+
+/*
* fourth extended file system inode data in memory
*/
struct ext4_inode_info {
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 1b8024d26f65..53f2b98a69f3 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -76,7 +76,6 @@ static int ext4_init_inode_bitmap(struct super_block *sb,
/* If checksum is bad mark all blocks and inodes use to prevent
* allocation, essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
- ext4_error(sb, "Checksum bad for group %u", block_group);
grp = ext4_get_group_info(sb, block_group);
if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
percpu_counter_sub(&sbi->s_freeclusters_counter,
@@ -191,8 +190,11 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
set_buffer_verified(bh);
ext4_unlock_group(sb, block_group);
unlock_buffer(bh);
- if (err)
+ if (err) {
+ ext4_error(sb, "Failed to init inode bitmap for group "
+ "%u: %d", block_group, err);
goto out;
+ }
return bh;
}
ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index ea433a7f4bca..06bda0361e7c 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -657,6 +657,34 @@ has_zeroout:
return retval;
}
+/*
+ * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
+ * we have to be careful as someone else may be manipulating b_state as well.
+ */
+static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
+{
+ unsigned long old_state;
+ unsigned long new_state;
+
+ flags &= EXT4_MAP_FLAGS;
+
+ /* Dummy buffer_head? Set non-atomically. */
+ if (!bh->b_page) {
+ bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
+ return;
+ }
+ /*
+ * Someone else may be modifying b_state. Be careful! This is ugly but
+ * once we get rid of using bh as a container for mapping information
+ * to pass to / from get_block functions, this can go away.
+ */
+ do {
+ old_state = READ_ONCE(bh->b_state);
+ new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
+ } while (unlikely(
+ cmpxchg(&bh->b_state, old_state, new_state) != old_state));
+}
+
/* Maximum number of blocks we map for direct IO at once. */
#define DIO_MAX_BLOCKS 4096
@@ -693,7 +721,7 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
ext4_io_end_t *io_end = ext4_inode_aio(inode);
map_bh(bh, inode->i_sb, map.m_pblk);
- bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
+ ext4_update_bh_state(bh, map.m_flags);
if (IS_DAX(inode) && buffer_unwritten(bh)) {
/*
* dgc: I suspect unwritten conversion on ext4+DAX is
@@ -1669,7 +1697,7 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
return ret;
map_bh(bh, inode->i_sb, map.m_pblk);
- bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
+ ext4_update_bh_state(bh, map.m_flags);
if (buffer_unwritten(bh)) {
/* A delayed write to unwritten bh should be marked
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index fb6f11709ae6..9bdbf98240a0 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -60,10 +60,10 @@ ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
{
if (first < second) {
down_write(&EXT4_I(first)->i_data_sem);
- down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
+ down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
} else {
down_write(&EXT4_I(second)->i_data_sem);
- down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
+ down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
}
}
@@ -265,11 +265,12 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
ext4_lblk_t orig_blk_offset, donor_blk_offset;
unsigned long blocksize = orig_inode->i_sb->s_blocksize;
unsigned int tmp_data_size, data_size, replaced_size;
- int err2, jblocks, retries = 0;
+ int i, err2, jblocks, retries = 0;
int replaced_count = 0;
int from = data_offset_in_page << orig_inode->i_blkbits;
int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
struct super_block *sb = orig_inode->i_sb;
+ struct buffer_head *bh = NULL;
/*
* It needs twice the amount of ordinary journal buffers because
@@ -380,8 +381,16 @@ data_copy:
}
/* Perform all necessary steps similar write_begin()/write_end()
* but keeping in mind that i_size will not change */
- *err = __block_write_begin(pagep[0], from, replaced_size,
- ext4_get_block);
+ if (!page_has_buffers(pagep[0]))
+ create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
+ bh = page_buffers(pagep[0]);
+ for (i = 0; i < data_offset_in_page; i++)
+ bh = bh->b_this_page;
+ for (i = 0; i < block_len_in_page; i++) {
+ *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
+ if (*err < 0)
+ break;
+ }
if (!*err)
*err = block_commit_write(pagep[0], from, from + replaced_size);
@@ -474,6 +483,13 @@ mext_check_arguments(struct inode *orig_inode,
return -EBUSY;
}
+ if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) {
+ ext4_debug("ext4 move extent: The argument files should "
+ "not be quota files [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EBUSY;
+ }
+
/* Ext4 move extent supports only extent based file */
if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
ext4_debug("ext4 move extent: orig file is not extents "
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index ad62d7acc315..34038e3598d5 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -198,7 +198,7 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
if (flex_gd == NULL)
goto out3;
- if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
+ if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
goto out2;
flex_gd->count = flexbg_size;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index c9ab67da6e5a..ba1cf0bf2f81 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1292,9 +1292,9 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
return -1;
}
if (ext4_has_feature_quota(sb)) {
- ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options "
- "when QUOTA feature is enabled");
- return -1;
+ ext4_msg(sb, KERN_INFO, "Journaled quota options "
+ "ignored when QUOTA feature is enabled");
+ return 1;
}
qname = match_strdup(args);
if (!qname) {
@@ -1657,10 +1657,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
return -1;
}
if (ext4_has_feature_quota(sb)) {
- ext4_msg(sb, KERN_ERR,
- "Cannot set journaled quota options "
+ ext4_msg(sb, KERN_INFO,
+ "Quota format mount options ignored "
"when QUOTA feature is enabled");
- return -1;
+ return 1;
}
sbi->s_jquota_fmt = m->mount_opt;
#endif
@@ -1721,11 +1721,11 @@ static int parse_options(char *options, struct super_block *sb,
#ifdef CONFIG_QUOTA
if (ext4_has_feature_quota(sb) &&
(test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
- ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA "
- "feature is enabled");
- return 0;
- }
- if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
+ ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota "
+ "mount options ignored.");
+ clear_opt(sb, USRQUOTA);
+ clear_opt(sb, GRPQUOTA);
+ } else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
clear_opt(sb, USRQUOTA);
@@ -4936,6 +4936,20 @@ static int ext4_quota_on_mount(struct super_block *sb, int type)
EXT4_SB(sb)->s_jquota_fmt, type);
}
+static void lockdep_set_quota_inode(struct inode *inode, int subclass)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ /* The first argument of lockdep_set_subclass has to be
+ * *exactly* the same as the argument to init_rwsem() --- in
+ * this case, in init_once() --- or lockdep gets unhappy
+ * because the name of the lock is set using the
+ * stringification of the argument to init_rwsem().
+ */
+ (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */
+ lockdep_set_subclass(&ei->i_data_sem, subclass);
+}
+
/*
* Standard function to be called on quota_on
*/
@@ -4975,8 +4989,12 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
if (err)
return err;
}
-
- return dquot_quota_on(sb, type, format_id, path);
+ lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
+ err = dquot_quota_on(sb, type, format_id, path);
+ if (err)
+ lockdep_set_quota_inode(path->dentry->d_inode,
+ I_DATA_SEM_NORMAL);
+ return err;
}
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
@@ -5002,8 +5020,11 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
/* Don't account quota for quota files to avoid recursion */
qf_inode->i_flags |= S_NOQUOTA;
+ lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
err = dquot_enable(qf_inode, type, format_id, flags);
iput(qf_inode);
+ if (err)
+ lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
return err;
}
diff --git a/fs/fhandle.c b/fs/fhandle.c
index d59712dfa3e7..ca3c3dd01789 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -228,7 +228,7 @@ long do_handle_open(int mountdirfd,
path_put(&path);
return fd;
}
- file = file_open_root(path.dentry, path.mnt, "", open_flag);
+ file = file_open_root(path.dentry, path.mnt, "", open_flag, 0);
if (IS_ERR(file)) {
put_unused_fd(fd);
retval = PTR_ERR(file);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 28cd6508f4aa..de11206dda63 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -223,6 +223,9 @@ static void wb_wait_for_completion(struct backing_dev_info *bdi,
#define WB_FRN_HIST_MAX_SLOTS (WB_FRN_HIST_THR_SLOTS / 2 + 1)
/* one round can affect upto 5 slots */
+static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
+static struct workqueue_struct *isw_wq;
+
void __inode_attach_wb(struct inode *inode, struct page *page)
{
struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -278,13 +281,15 @@ locked_inode_to_wb_and_lock_list(struct inode *inode)
wb_get(wb);
spin_unlock(&inode->i_lock);
spin_lock(&wb->list_lock);
- wb_put(wb); /* not gonna deref it anymore */
/* i_wb may have changed inbetween, can't use inode_to_wb() */
- if (likely(wb == inode->i_wb))
- return wb; /* @inode already has ref */
+ if (likely(wb == inode->i_wb)) {
+ wb_put(wb); /* @inode already has ref */
+ return wb;
+ }
spin_unlock(&wb->list_lock);
+ wb_put(wb);
cpu_relax();
spin_lock(&inode->i_lock);
}
@@ -424,6 +429,8 @@ skip_switch:
iput(inode);
kfree(isw);
+
+ atomic_dec(&isw_nr_in_flight);
}
static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
@@ -433,7 +440,7 @@ static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
/* needs to grab bh-unsafe locks, bounce to work item */
INIT_WORK(&isw->work, inode_switch_wbs_work_fn);
- schedule_work(&isw->work);
+ queue_work(isw_wq, &isw->work);
}
/**
@@ -469,7 +476,8 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
/* while holding I_WB_SWITCH, no one else can update the association */
spin_lock(&inode->i_lock);
- if (inode->i_state & (I_WB_SWITCH | I_FREEING) ||
+ if (!(inode->i_sb->s_flags & MS_ACTIVE) ||
+ inode->i_state & (I_WB_SWITCH | I_FREEING) ||
inode_to_wb(inode) == isw->new_wb) {
spin_unlock(&inode->i_lock);
goto out_free;
@@ -480,6 +488,8 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
ihold(inode);
isw->inode = inode;
+ atomic_inc(&isw_nr_in_flight);
+
/*
* In addition to synchronizing among switchers, I_WB_SWITCH tells
* the RCU protected stat update paths to grab the mapping's
@@ -842,6 +852,33 @@ restart:
wb_put(last_wb);
}
+/**
+ * cgroup_writeback_umount - flush inode wb switches for umount
+ *
+ * This function is called when a super_block is about to be destroyed and
+ * flushes in-flight inode wb switches. An inode wb switch goes through
+ * RCU and then workqueue, so the two need to be flushed in order to ensure
+ * that all previously scheduled switches are finished. As wb switches are
+ * rare occurrences and synchronize_rcu() can take a while, perform
+ * flushing iff wb switches are in flight.
+ */
+void cgroup_writeback_umount(void)
+{
+ if (atomic_read(&isw_nr_in_flight)) {
+ synchronize_rcu();
+ flush_workqueue(isw_wq);
+ }
+}
+
+static int __init cgroup_writeback_init(void)
+{
+ isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
+ if (!isw_wq)
+ return -ENOMEM;
+ return 0;
+}
+fs_initcall(cgroup_writeback_init);
+
#else /* CONFIG_CGROUP_WRITEBACK */
static struct bdi_writeback *
@@ -1304,10 +1341,10 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
* and does more profound writeback list handling in writeback_sb_inodes().
*/
-static int
-writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
- struct writeback_control *wbc)
+static int writeback_single_inode(struct inode *inode,
+ struct writeback_control *wbc)
{
+ struct bdi_writeback *wb;
int ret = 0;
spin_lock(&inode->i_lock);
@@ -1345,7 +1382,8 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
ret = __writeback_single_inode(inode, wbc);
wbc_detach_inode(wbc);
- spin_lock(&wb->list_lock);
+
+ wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
/*
* If inode is clean, remove it from writeback lists. Otherwise don't
@@ -1420,6 +1458,7 @@ static long writeback_sb_inodes(struct super_block *sb,
while (!list_empty(&wb->b_io)) {
struct inode *inode = wb_inode(wb->b_io.prev);
+ struct bdi_writeback *tmp_wb;
if (inode->i_sb != sb) {
if (work->sb) {
@@ -1510,15 +1549,23 @@ static long writeback_sb_inodes(struct super_block *sb,
cond_resched();
}
-
- spin_lock(&wb->list_lock);
+ /*
+ * Requeue @inode if still dirty. Be careful as @inode may
+ * have been switched to another wb in the meantime.
+ */
+ tmp_wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
if (!(inode->i_state & I_DIRTY_ALL))
wrote++;
- requeue_inode(inode, wb, &wbc);
+ requeue_inode(inode, tmp_wb, &wbc);
inode_sync_complete(inode);
spin_unlock(&inode->i_lock);
+ if (unlikely(tmp_wb != wb)) {
+ spin_unlock(&tmp_wb->list_lock);
+ spin_lock(&wb->list_lock);
+ }
+
/*
* bail out to wb_writeback() often enough to check
* background threshold and other termination conditions.
@@ -2305,7 +2352,6 @@ EXPORT_SYMBOL(sync_inodes_sb);
*/
int write_inode_now(struct inode *inode, int sync)
{
- struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
struct writeback_control wbc = {
.nr_to_write = LONG_MAX,
.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
@@ -2317,7 +2363,7 @@ int write_inode_now(struct inode *inode, int sync)
wbc.nr_to_write = 0;
might_sleep();
- return writeback_single_inode(inode, wb, &wbc);
+ return writeback_single_inode(inode, &wbc);
}
EXPORT_SYMBOL(write_inode_now);
@@ -2334,7 +2380,7 @@ EXPORT_SYMBOL(write_inode_now);
*/
int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
- return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
+ return writeback_single_inode(inode, wbc);
}
EXPORT_SYMBOL(sync_inode);
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 8e3ee1936c7e..c5b6b7165489 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -90,7 +90,7 @@ static struct list_head *cuse_conntbl_head(dev_t devt)
static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
{
- struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(kiocb->ki_filp);
loff_t pos = 0;
return fuse_direct_io(&io, to, &pos, FUSE_DIO_CUSE);
@@ -98,7 +98,7 @@ static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
static ssize_t cuse_write_iter(struct kiocb *kiocb, struct iov_iter *from)
{
- struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(kiocb->ki_filp);
loff_t pos = 0;
/*
* No locking or generic_write_checks(), the server is
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 570ca4053c80..c2e340d6ec6e 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -528,6 +528,11 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
}
}
+static void fuse_io_release(struct kref *kref)
+{
+ kfree(container_of(kref, struct fuse_io_priv, refcnt));
+}
+
static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
{
if (io->err)
@@ -585,8 +590,9 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
}
io->iocb->ki_complete(io->iocb, res, 0);
- kfree(io);
}
+
+ kref_put(&io->refcnt, fuse_io_release);
}
static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
@@ -613,6 +619,7 @@ static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
size_t num_bytes, struct fuse_io_priv *io)
{
spin_lock(&io->lock);
+ kref_get(&io->refcnt);
io->size += num_bytes;
io->reqs++;
spin_unlock(&io->lock);
@@ -691,7 +698,7 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode,
static int fuse_do_readpage(struct file *file, struct page *page)
{
- struct fuse_io_priv io = { .async = 0, .file = file };
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
struct inode *inode = page->mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_req *req;
@@ -984,7 +991,7 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
size_t res;
unsigned offset;
unsigned i;
- struct fuse_io_priv io = { .async = 0, .file = file };
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
for (i = 0; i < req->num_pages; i++)
fuse_wait_on_page_writeback(inode, req->pages[i]->index);
@@ -1398,7 +1405,7 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- struct fuse_io_priv io = { .async = 0, .file = iocb->ki_filp };
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb->ki_filp);
return __fuse_direct_read(&io, to, &iocb->ki_pos);
}
@@ -1406,7 +1413,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
- struct fuse_io_priv io = { .async = 0, .file = file };
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
ssize_t res;
if (is_bad_inode(inode))
@@ -2786,6 +2793,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
loff_t i_size;
size_t count = iov_iter_count(iter);
struct fuse_io_priv *io;
+ bool is_sync = is_sync_kiocb(iocb);
pos = offset;
inode = file->f_mapping->host;
@@ -2806,6 +2814,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
if (!io)
return -ENOMEM;
spin_lock_init(&io->lock);
+ kref_init(&io->refcnt);
io->reqs = 1;
io->bytes = -1;
io->size = 0;
@@ -2825,12 +2834,18 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
* to wait on real async I/O requests, so we must submit this request
* synchronously.
*/
- if (!is_sync_kiocb(iocb) && (offset + count > i_size) &&
+ if (!is_sync && (offset + count > i_size) &&
iov_iter_rw(iter) == WRITE)
io->async = false;
- if (io->async && is_sync_kiocb(iocb))
+ if (io->async && is_sync) {
+ /*
+ * Additional reference to keep io around after
+ * calling fuse_aio_complete()
+ */
+ kref_get(&io->refcnt);
io->done = &wait;
+ }
if (iov_iter_rw(iter) == WRITE) {
ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
@@ -2843,14 +2858,14 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
/* we have a non-extending, async request, so return */
- if (!is_sync_kiocb(iocb))
+ if (!is_sync)
return -EIOCBQUEUED;
wait_for_completion(&wait);
ret = fuse_get_res_by_io(io);
}
- kfree(io);
+ kref_put(&io->refcnt, fuse_io_release);
if (iov_iter_rw(iter) == WRITE) {
if (ret > 0)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 6b4f8dd8e9c4..644687ae04bd 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -22,6 +22,7 @@
#include <linux/rbtree.h>
#include <linux/poll.h>
#include <linux/workqueue.h>
+#include <linux/kref.h>
/** Max number of pages that can be used in a single read request */
#define FUSE_MAX_PAGES_PER_REQ 32
@@ -243,6 +244,7 @@ struct fuse_args {
/** The request IO state (for asynchronous processing) */
struct fuse_io_priv {
+ struct kref refcnt;
int async;
spinlock_t lock;
unsigned reqs;
@@ -256,6 +258,13 @@ struct fuse_io_priv {
struct completion *done;
};
+#define FUSE_IO_PRIV_SYNC(f) \
+{ \
+ .refcnt = { ATOMIC_INIT(1) }, \
+ .async = 0, \
+ .file = f, \
+}
+
/**
* Request flags
*
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 2ac99db3750e..5a7b3229b956 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -730,15 +730,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
init_special_inode(inode, mode, dev);
err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
- if (!err)
+ if (err)
goto out_free;
err = read_name(inode, name);
__putname(name);
if (err)
goto out_put;
- if (err)
- goto out_put;
d_instantiate(dentry, inode);
return 0;
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index ae4d5a1fa4c9..bffb908acbd4 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -375,12 +375,11 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
struct inode *inode = d_inode(dentry);
dnode_secno dno;
int r;
- int rep = 0;
int err;
hpfs_lock(dir->i_sb);
hpfs_adjust_length(name, &len);
-again:
+
err = -ENOENT;
de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
if (!de)
@@ -400,33 +399,9 @@ again:
hpfs_error(dir->i_sb, "there was error when removing dirent");
err = -EFSERROR;
break;
- case 2: /* no space for deleting, try to truncate file */
-
+ case 2: /* no space for deleting */
err = -ENOSPC;
- if (rep++)
- break;
-
- dentry_unhash(dentry);
- if (!d_unhashed(dentry)) {
- hpfs_unlock(dir->i_sb);
- return -ENOSPC;
- }
- if (generic_permission(inode, MAY_WRITE) ||
- !S_ISREG(inode->i_mode) ||
- get_write_access(inode)) {
- d_rehash(dentry);
- } else {
- struct iattr newattrs;
- /*pr_info("truncating file before delete.\n");*/
- newattrs.ia_size = 0;
- newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
- err = notify_change(dentry, &newattrs, NULL);
- put_write_access(inode);
- if (!err)
- goto again;
- }
- hpfs_unlock(dir->i_sb);
- return -ENOSPC;
+ break;
default:
drop_nlink(inode);
err = 0;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index de4bdfac0cec..595ebdb41846 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -463,6 +463,7 @@ hugetlb_vmdelete_list(struct rb_root *root, pgoff_t start, pgoff_t end)
*/
vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
unsigned long v_offset;
+ unsigned long v_end;
/*
* Can the expression below overflow on 32-bit arches?
@@ -475,15 +476,17 @@ hugetlb_vmdelete_list(struct rb_root *root, pgoff_t start, pgoff_t end)
else
v_offset = 0;
- if (end) {
- end = ((end - start) << PAGE_SHIFT) +
- vma->vm_start + v_offset;
- if (end > vma->vm_end)
- end = vma->vm_end;
- } else
- end = vma->vm_end;
+ if (!end)
+ v_end = vma->vm_end;
+ else {
+ v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
+ + vma->vm_start;
+ if (v_end > vma->vm_end)
+ v_end = vma->vm_end;
+ }
- unmap_hugepage_range(vma, vma->vm_start + v_offset, end, NULL);
+ unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
+ NULL);
}
}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 81e622681c82..624a57a9c4aa 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1408,11 +1408,12 @@ out:
/**
* jbd2_mark_journal_empty() - Mark on disk journal as empty.
* @journal: The journal to update.
+ * @write_op: With which operation should we write the journal sb
*
* Update a journal's dynamic superblock fields to show that journal is empty.
* Write updated superblock to disk waiting for IO to complete.
*/
-static void jbd2_mark_journal_empty(journal_t *journal)
+static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
{
journal_superblock_t *sb = journal->j_superblock;
@@ -1430,7 +1431,7 @@ static void jbd2_mark_journal_empty(journal_t *journal)
sb->s_start = cpu_to_be32(0);
read_unlock(&journal->j_state_lock);
- jbd2_write_superblock(journal, WRITE_FUA);
+ jbd2_write_superblock(journal, write_op);
/* Log is no longer empty */
write_lock(&journal->j_state_lock);
@@ -1716,7 +1717,13 @@ int jbd2_journal_destroy(journal_t *journal)
if (journal->j_sb_buffer) {
if (!is_journal_aborted(journal)) {
mutex_lock(&journal->j_checkpoint_mutex);
- jbd2_mark_journal_empty(journal);
+
+ write_lock(&journal->j_state_lock);
+ journal->j_tail_sequence =
+ ++journal->j_transaction_sequence;
+ write_unlock(&journal->j_state_lock);
+
+ jbd2_mark_journal_empty(journal, WRITE_FLUSH_FUA);
mutex_unlock(&journal->j_checkpoint_mutex);
} else
err = -EIO;
@@ -1975,7 +1982,7 @@ int jbd2_journal_flush(journal_t *journal)
* the magic code for a fully-recovered superblock. Any future
* commits of data to the journal will restore the current
* s_start value. */
- jbd2_mark_journal_empty(journal);
+ jbd2_mark_journal_empty(journal, WRITE_FUA);
mutex_unlock(&journal->j_checkpoint_mutex);
write_lock(&journal->j_state_lock);
J_ASSERT(!journal->j_running_transaction);
@@ -2021,7 +2028,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
if (write) {
/* Lock to make assertions happy... */
mutex_lock(&journal->j_checkpoint_mutex);
- jbd2_mark_journal_empty(journal);
+ jbd2_mark_journal_empty(journal, WRITE_FUA);
mutex_unlock(&journal->j_checkpoint_mutex);
}
diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking
index 3ea36554107f..8918ac905a3b 100644
--- a/fs/jffs2/README.Locking
+++ b/fs/jffs2/README.Locking
@@ -2,10 +2,6 @@
JFFS2 LOCKING DOCUMENTATION
---------------------------
-At least theoretically, JFFS2 does not require the Big Kernel Lock
-(BKL), which was always helpfully obtained for it by Linux 2.4 VFS
-code. It has its own locking, as described below.
-
This document attempts to describe the existing locking rules for
JFFS2. It is not expected to remain perfectly up to date, but ought to
be fairly close.
@@ -69,6 +65,7 @@ Ordering constraints:
any f->sem held.
2. Never attempt to lock two file mutexes in one thread.
No ordering rules have been made for doing so.
+ 3. Never lock a page cache page with f->sem held.
erase_completion_lock spinlock
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index a3750f902adc..c1f04947d7dc 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -49,7 +49,8 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
- struct jffs2_inode_cache *ic)
+ struct jffs2_inode_cache *ic,
+ int *dir_hardlinks)
{
struct jffs2_full_dirent *fd;
@@ -68,19 +69,21 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
fd->name, fd->ino, ic->ino);
jffs2_mark_node_obsolete(c, fd->raw);
+ /* Clear the ic/raw union so it doesn't cause problems later. */
+ fd->ic = NULL;
continue;
}
+ /* From this point, fd->raw is no longer used so we can set fd->ic */
+ fd->ic = child_ic;
+ child_ic->pino_nlink++;
+ /* If we appear (at this stage) to have hard-linked directories,
+ * set a flag to trigger a scan later */
if (fd->type == DT_DIR) {
- if (child_ic->pino_nlink) {
- JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
- fd->name, fd->ino, ic->ino);
- /* TODO: What do we do about it? */
- } else {
- child_ic->pino_nlink = ic->ino;
- }
- } else
- child_ic->pino_nlink++;
+ child_ic->flags |= INO_FLAGS_IS_DIR;
+ if (child_ic->pino_nlink > 1)
+ *dir_hardlinks = 1;
+ }
dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
/* Can't free scan_dents so far. We might need them in pass 2 */
@@ -94,8 +97,7 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
*/
static int jffs2_build_filesystem(struct jffs2_sb_info *c)
{
- int ret;
- int i;
+ int ret, i, dir_hardlinks = 0;
struct jffs2_inode_cache *ic;
struct jffs2_full_dirent *fd;
struct jffs2_full_dirent *dead_fds = NULL;
@@ -119,7 +121,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
/* Now scan the directory tree, increasing nlink according to every dirent found. */
for_each_inode(i, c, ic) {
if (ic->scan_dents) {
- jffs2_build_inode_pass1(c, ic);
+ jffs2_build_inode_pass1(c, ic, &dir_hardlinks);
cond_resched();
}
}
@@ -155,6 +157,20 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
}
dbg_fsbuild("pass 2a complete\n");
+
+ if (dir_hardlinks) {
+ /* If we detected directory hardlinks earlier, *hopefully*
+ * they are gone now because some of the links were from
+ * dead directories which still had some old dirents lying
+ * around and not yet garbage-collected, but which have
+ * been discarded above. So clear the pino_nlink field
+ * in each directory, so that the final scan below can
+ * print appropriate warnings. */
+ for_each_inode(i, c, ic) {
+ if (ic->flags & INO_FLAGS_IS_DIR)
+ ic->pino_nlink = 0;
+ }
+ }
dbg_fsbuild("freeing temporary data structures\n");
/* Finally, we can scan again and free the dirent structs */
@@ -162,6 +178,33 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
while(ic->scan_dents) {
fd = ic->scan_dents;
ic->scan_dents = fd->next;
+ /* We do use the pino_nlink field to count nlink of
+ * directories during fs build, so set it to the
+ * parent ino# now. Now that there's hopefully only
+ * one. */
+ if (fd->type == DT_DIR) {
+ if (!fd->ic) {
+ /* We'll have complained about it and marked the coresponding
+ raw node obsolete already. Just skip it. */
+ continue;
+ }
+
+ /* We *have* to have set this in jffs2_build_inode_pass1() */
+ BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR));
+
+ /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks
+ * is set. Otherwise, we know this should never trigger anyway, so
+ * we don't do the check. And ic->pino_nlink still contains the nlink
+ * value (which is 1). */
+ if (dir_hardlinks && fd->ic->pino_nlink) {
+ JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n",
+ fd->name, fd->ino, ic->ino, fd->ic->pino_nlink);
+ /* Should we unlink it from its previous parent? */
+ }
+
+ /* For directories, ic->pino_nlink holds that parent inode # */
+ fd->ic->pino_nlink = ic->ino;
+ }
jffs2_free_full_dirent(fd);
}
ic->scan_dents = NULL;
@@ -240,11 +283,7 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
/* Reduce nlink of the child. If it's now zero, stick it on the
dead_fds list to be cleaned up later. Else just free the fd */
-
- if (fd->type == DT_DIR)
- child_ic->pino_nlink = 0;
- else
- child_ic->pino_nlink--;
+ child_ic->pino_nlink--;
if (!child_ic->pino_nlink) {
dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index d211b8e18566..30c4c9ebb693 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -843,9 +843,14 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n",
__func__, ret);
- /* Might as well let the VFS know */
- d_instantiate(new_dentry, d_inode(old_dentry));
- ihold(d_inode(old_dentry));
+ /*
+ * We can't keep the target in dcache after that.
+ * For one thing, we can't afford dentry aliases for directories.
+ * For another, if there was a victim, we _can't_ set new inode
+ * for that sucker and we have to trigger mount eviction - the
+ * caller won't do it on its own since we are returning an error.
+ */
+ d_invalidate(new_dentry);
new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now);
return ret;
}
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index f509f62e12f6..3361979d728c 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -137,39 +137,33 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
struct page *pg;
struct inode *inode = mapping->host;
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
- struct jffs2_raw_inode ri;
- uint32_t alloc_len = 0;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
uint32_t pageofs = index << PAGE_CACHE_SHIFT;
int ret = 0;
- jffs2_dbg(1, "%s()\n", __func__);
-
- if (pageofs > inode->i_size) {
- ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
- if (ret)
- return ret;
- }
-
- mutex_lock(&f->sem);
pg = grab_cache_page_write_begin(mapping, index, flags);
- if (!pg) {
- if (alloc_len)
- jffs2_complete_reservation(c);
- mutex_unlock(&f->sem);
+ if (!pg)
return -ENOMEM;
- }
*pagep = pg;
- if (alloc_len) {
+ jffs2_dbg(1, "%s()\n", __func__);
+
+ if (pageofs > inode->i_size) {
/* Make new hole frag from old EOF to new page */
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+ struct jffs2_raw_inode ri;
struct jffs2_full_dnode *fn;
+ uint32_t alloc_len;
jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
(unsigned int)inode->i_size, pageofs);
+ ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
+ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+ if (ret)
+ goto out_page;
+
+ mutex_lock(&f->sem);
memset(&ri, 0, sizeof(ri));
ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -196,6 +190,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
if (IS_ERR(fn)) {
ret = PTR_ERR(fn);
jffs2_complete_reservation(c);
+ mutex_unlock(&f->sem);
goto out_page;
}
ret = jffs2_add_full_dnode_to_inode(c, f, fn);
@@ -210,10 +205,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
jffs2_mark_node_obsolete(c, fn->raw);
jffs2_free_full_dnode(fn);
jffs2_complete_reservation(c);
+ mutex_unlock(&f->sem);
goto out_page;
}
jffs2_complete_reservation(c);
inode->i_size = pageofs;
+ mutex_unlock(&f->sem);
}
/*
@@ -222,18 +219,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
* case of a short-copy.
*/
if (!PageUptodate(pg)) {
+ mutex_lock(&f->sem);
ret = jffs2_do_readpage_nolock(inode, pg);
+ mutex_unlock(&f->sem);
if (ret)
goto out_page;
}
- mutex_unlock(&f->sem);
jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
return ret;
out_page:
unlock_page(pg);
page_cache_release(pg);
- mutex_unlock(&f->sem);
return ret;
}
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 5a2dec2b064c..95d5880a63ee 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -1296,14 +1296,17 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
BUG_ON(start > orig_start);
}
- /* First, use readpage() to read the appropriate page into the page cache */
- /* Q: What happens if we actually try to GC the _same_ page for which commit_write()
- * triggered garbage collection in the first place?
- * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the
- * page OK. We'll actually write it out again in commit_write, which is a little
- * suboptimal, but at least we're correct.
- */
+ /* The rules state that we must obtain the page lock *before* f->sem, so
+ * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
+ * actually going to *change* so we're safe; we only allow reading.
+ *
+ * It is important to note that jffs2_write_begin() will ensure that its
+ * page is marked Uptodate before allocating space. That means that if we
+ * end up here trying to GC the *same* page that jffs2_write_begin() is
+ * trying to write out, read_cache_page() will not deadlock. */
+ mutex_unlock(&f->sem);
pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
+ mutex_lock(&f->sem);
if (IS_ERR(pg_ptr)) {
pr_warn("read_cache_page() returned error: %ld\n",
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
index fa35ff79ab35..0637271f3770 100644
--- a/fs/jffs2/nodelist.h
+++ b/fs/jffs2/nodelist.h
@@ -194,6 +194,7 @@ struct jffs2_inode_cache {
#define INO_STATE_CLEARING 6 /* In clear_inode() */
#define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */
+#define INO_FLAGS_IS_DIR 0x02 /* is a directory */
#define RAWNODE_CLASS_INODE_CACHE 0
#define RAWNODE_CLASS_XATTR_DATUM 1
@@ -249,7 +250,10 @@ struct jffs2_readinode_info
struct jffs2_full_dirent
{
- struct jffs2_raw_node_ref *raw;
+ union {
+ struct jffs2_raw_node_ref *raw;
+ struct jffs2_inode_cache *ic; /* Just during part of build */
+ };
struct jffs2_full_dirent *next;
uint32_t version;
uint32_t ino; /* == zero for unlink */
diff --git a/fs/locks.c b/fs/locks.c
index 0d2b3267e2a3..6333263b7bc8 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -2182,7 +2182,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
goto out;
}
-again:
error = flock_to_posix_lock(filp, file_lock, &flock);
if (error)
goto out;
@@ -2224,19 +2223,22 @@ again:
* Attempt to detect a close/fcntl race and recover by
* releasing the lock that was just acquired.
*/
- /*
- * we need that spin_lock here - it prevents reordering between
- * update of i_flctx->flc_posix and check for it done in close().
- * rcu_read_lock() wouldn't do.
- */
- spin_lock(&current->files->file_lock);
- f = fcheck(fd);
- spin_unlock(&current->files->file_lock);
- if (!error && f != filp && flock.l_type != F_UNLCK) {
- flock.l_type = F_UNLCK;
- goto again;
+ if (!error && file_lock->fl_type != F_UNLCK) {
+ /*
+ * We need that spin_lock here - it prevents reordering between
+ * update of i_flctx->flc_posix and check for it done in
+ * close(). rcu_read_lock() wouldn't do.
+ */
+ spin_lock(&current->files->file_lock);
+ f = fcheck(fd);
+ spin_unlock(&current->files->file_lock);
+ if (f != filp) {
+ file_lock->fl_type = F_UNLCK;
+ error = do_lock_file_wait(filp, cmd, file_lock);
+ WARN_ON_ONCE(error);
+ error = -EBADF;
+ }
}
-
out:
locks_free_lock(file_lock);
return error;
@@ -2322,7 +2324,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
goto out;
}
-again:
error = flock64_to_posix_lock(filp, file_lock, &flock);
if (error)
goto out;
@@ -2364,14 +2365,22 @@ again:
* Attempt to detect a close/fcntl race and recover by
* releasing the lock that was just acquired.
*/
- spin_lock(&current->files->file_lock);
- f = fcheck(fd);
- spin_unlock(&current->files->file_lock);
- if (!error && f != filp && flock.l_type != F_UNLCK) {
- flock.l_type = F_UNLCK;
- goto again;
+ if (!error && file_lock->fl_type != F_UNLCK) {
+ /*
+ * We need that spin_lock here - it prevents reordering between
+ * update of i_flctx->flc_posix and check for it done in
+ * close(). rcu_read_lock() wouldn't do.
+ */
+ spin_lock(&current->files->file_lock);
+ f = fcheck(fd);
+ spin_unlock(&current->files->file_lock);
+ if (f != filp) {
+ file_lock->fl_type = F_UNLCK;
+ error = do_lock_file_wait(filp, cmd, file_lock);
+ WARN_ON_ONCE(error);
+ error = -EBADF;
+ }
}
-
out:
locks_free_lock(file_lock);
return error;
diff --git a/fs/namei.c b/fs/namei.c
index 0c3974cd3ecd..d8ee4da93650 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1711,6 +1711,11 @@ static inline int should_follow_link(struct nameidata *nd, struct path *link,
return 0;
if (!follow)
return 0;
+ /* make sure that d_is_symlink above matches inode */
+ if (nd->flags & LOOKUP_RCU) {
+ if (read_seqcount_retry(&link->dentry->d_seq, seq))
+ return -ECHILD;
+ }
return pick_link(nd, link, inode, seq);
}
@@ -1742,11 +1747,11 @@ static int walk_component(struct nameidata *nd, int flags)
if (err < 0)
return err;
- inode = d_backing_inode(path.dentry);
seq = 0; /* we are already out of RCU mode */
err = -ENOENT;
if (d_is_negative(path.dentry))
goto out_path_put;
+ inode = d_backing_inode(path.dentry);
}
if (flags & WALK_PUT)
@@ -3130,12 +3135,12 @@ retry_lookup:
return error;
BUG_ON(nd->flags & LOOKUP_RCU);
- inode = d_backing_inode(path.dentry);
seq = 0; /* out of RCU mode, so the value doesn't matter */
if (unlikely(d_is_negative(path.dentry))) {
path_to_nameidata(&path, nd);
return -ENOENT;
}
+ inode = d_backing_inode(path.dentry);
finish_lookup:
if (nd->depth)
put_link(nd);
@@ -3144,11 +3149,6 @@ finish_lookup:
if (unlikely(error))
return error;
- if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) {
- path_to_nameidata(&path, nd);
- return -ELOOP;
- }
-
if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
path_to_nameidata(&path, nd);
} else {
@@ -3167,6 +3167,10 @@ finish_open:
return error;
}
audit_inode(nd->name, nd->path.dentry, 0);
+ if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) {
+ error = -ELOOP;
+ goto out;
+ }
error = -EISDIR;
if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
goto out;
@@ -3210,6 +3214,10 @@ opened:
goto exit_fput;
}
out:
+ if (unlikely(error > 0)) {
+ WARN_ON(1);
+ error = -EINVAL;
+ }
if (got_write)
mnt_drop_write(nd->path.mnt);
path_put(&save_parent);
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index f0e3e9e747dd..03446c5a3ec1 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -633,7 +633,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
d_rehash(newdent);
} else {
spin_lock(&dentry->d_lock);
- NCP_FINFO(inode)->flags &= ~NCPI_DIR_CACHE;
+ NCP_FINFO(dir)->flags &= ~NCPI_DIR_CACHE;
spin_unlock(&dentry->d_lock);
}
} else {
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index ce5a21861074..5fc2162afb67 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -377,7 +377,7 @@ int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
again:
timestamp = jiffies;
gencount = nfs_inc_attr_generation_counter();
- error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, entry->cookie, pages,
+ error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages,
NFS_SERVER(inode)->dtsize, desc->plus);
if (error < 0) {
/* We requested READDIRPLUS, but the server doesn't grok it */
@@ -560,7 +560,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
count++;
if (desc->plus != 0)
- nfs_prime_dcache(desc->file->f_path.dentry, entry);
+ nfs_prime_dcache(file_dentry(desc->file), entry);
status = nfs_readdir_add_to_array(entry, page);
if (status != 0)
@@ -864,7 +864,7 @@ static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
*/
static int nfs_readdir(struct file *file, struct dir_context *ctx)
{
- struct dentry *dentry = file->f_path.dentry;
+ struct dentry *dentry = file_dentry(file);
struct inode *inode = d_inode(dentry);
nfs_readdir_descriptor_t my_desc,
*desc = &my_desc;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 03516c80855a..2a2e2d8ddee5 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -145,7 +145,7 @@ static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
return false;
for (i = 0; i < m1->fh_versions_cnt; i++) {
bool found_fh = false;
- for (j = 0; j < m2->fh_versions_cnt; i++) {
+ for (j = 0; j < m2->fh_versions_cnt; j++) {
if (nfs_compare_fh(&m1->fh_versions[i],
&m2->fh_versions[j]) == 0) {
found_fh = true;
@@ -1859,11 +1859,9 @@ ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
start = xdr_reserve_space(xdr, 4);
BUG_ON(!start);
- if (ff_layout_encode_ioerr(flo, xdr, args))
- goto out;
-
+ ff_layout_encode_ioerr(flo, xdr, args);
ff_layout_encode_iostats(flo, xdr, args);
-out:
+
*start = cpu_to_be32((xdr->p - start - 1) * 4);
dprintk("%s: Return\n", __func__);
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index c7e8b87da5b2..f714b98cfd74 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -927,7 +927,7 @@ int nfs_open(struct inode *inode, struct file *filp)
{
struct nfs_open_context *ctx;
- ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
+ ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
nfs_file_set_open_context(filp, ctx);
@@ -1641,6 +1641,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
unsigned long invalid = 0;
unsigned long now = jiffies;
unsigned long save_cache_validity;
+ bool cache_revalidated = true;
dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%x)\n",
__func__, inode->i_sb->s_id, inode->i_ino,
@@ -1702,22 +1703,28 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfs_force_lookup_revalidate(inode);
inode->i_version = fattr->change_attr;
}
- } else
+ } else {
nfsi->cache_validity |= save_cache_validity;
+ cache_revalidated = false;
+ }
if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
- } else if (server->caps & NFS_CAP_MTIME)
+ } else if (server->caps & NFS_CAP_MTIME) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_REVAL_FORCED);
+ cache_revalidated = false;
+ }
if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
- } else if (server->caps & NFS_CAP_CTIME)
+ } else if (server->caps & NFS_CAP_CTIME) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_REVAL_FORCED);
+ cache_revalidated = false;
+ }
/* Check if our cached file size is stale */
if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
@@ -1737,19 +1744,23 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
(long long)cur_isize,
(long long)new_isize);
}
- } else
+ } else {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_REVAL_PAGECACHE
| NFS_INO_REVAL_FORCED);
+ cache_revalidated = false;
+ }
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
- else if (server->caps & NFS_CAP_ATIME)
+ else if (server->caps & NFS_CAP_ATIME) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATIME
| NFS_INO_REVAL_FORCED);
+ cache_revalidated = false;
+ }
if (fattr->valid & NFS_ATTR_FATTR_MODE) {
if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
@@ -1758,36 +1769,42 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
inode->i_mode = newmode;
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
}
- } else if (server->caps & NFS_CAP_MODE)
+ } else if (server->caps & NFS_CAP_MODE) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
| NFS_INO_REVAL_FORCED);
+ cache_revalidated = false;
+ }
if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
if (!uid_eq(inode->i_uid, fattr->uid)) {
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
inode->i_uid = fattr->uid;
}
- } else if (server->caps & NFS_CAP_OWNER)
+ } else if (server->caps & NFS_CAP_OWNER) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
| NFS_INO_REVAL_FORCED);
+ cache_revalidated = false;
+ }
if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
if (!gid_eq(inode->i_gid, fattr->gid)) {
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
inode->i_gid = fattr->gid;
}
- } else if (server->caps & NFS_CAP_OWNER_GROUP)
+ } else if (server->caps & NFS_CAP_OWNER_GROUP) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
| NFS_INO_REVAL_FORCED);
+ cache_revalidated = false;
+ }
if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
if (inode->i_nlink != fattr->nlink) {
@@ -1796,19 +1813,22 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
invalid |= NFS_INO_INVALID_DATA;
set_nlink(inode, fattr->nlink);
}
- } else if (server->caps & NFS_CAP_NLINK)
+ } else if (server->caps & NFS_CAP_NLINK) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_REVAL_FORCED);
+ cache_revalidated = false;
+ }
if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
/*
* report the blocks in 512byte units
*/
inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
- }
- if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
+ } else if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
inode->i_blocks = fattr->du.nfs2.blocks;
+ else
+ cache_revalidated = false;
/* Update attrtimeo value if we're out of the unstable period */
if (invalid & NFS_INO_INVALID_ATTR) {
@@ -1818,9 +1838,13 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
/* Set barrier to be more recent than all outstanding updates */
nfsi->attr_gencount = nfs_inc_attr_generation_counter();
} else {
- if (!time_in_range_open(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
- if ((nfsi->attrtimeo <<= 1) > NFS_MAXATTRTIMEO(inode))
- nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
+ if (cache_revalidated) {
+ if (!time_in_range_open(now, nfsi->attrtimeo_timestamp,
+ nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
+ nfsi->attrtimeo <<= 1;
+ if (nfsi->attrtimeo > NFS_MAXATTRTIMEO(inode))
+ nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
+ }
nfsi->attrtimeo_timestamp = now;
}
/* Set the barrier to be more recent than this fattr */
@@ -1829,7 +1853,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
}
/* Don't declare attrcache up to date if there were no attrs! */
- if (fattr->valid != 0)
+ if (cache_revalidated)
invalid &= ~NFS_INO_INVALID_ATTR;
/* Don't invalidate the data if we were to blame */
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index db9b5fea5b3e..679e003818b1 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -26,7 +26,7 @@ static int
nfs4_file_open(struct inode *inode, struct file *filp)
{
struct nfs_open_context *ctx;
- struct dentry *dentry = filp->f_path.dentry;
+ struct dentry *dentry = file_dentry(filp);
struct dentry *parent = NULL;
struct inode *dir;
unsigned openflags = filp->f_flags;
@@ -57,7 +57,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
parent = dget_parent(dentry);
dir = d_inode(parent);
- ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
+ ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
err = PTR_ERR(ctx);
if (IS_ERR(ctx))
goto out;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 89818036f035..98a44157353a 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1385,6 +1385,7 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s
* Protect the call to nfs4_state_set_mode_locked and
* serialise the stateid update
*/
+ spin_lock(&state->owner->so_lock);
write_seqlock(&state->seqlock);
if (deleg_stateid != NULL) {
nfs4_stateid_copy(&state->stateid, deleg_stateid);
@@ -1393,7 +1394,6 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s
if (open_stateid != NULL)
nfs_set_open_stateid_locked(state, open_stateid, fmode);
write_sequnlock(&state->seqlock);
- spin_lock(&state->owner->so_lock);
update_open_stateflags(state, fmode);
spin_unlock(&state->owner->so_lock);
}
@@ -2461,9 +2461,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
dentry = d_add_unique(dentry, igrab(state->inode));
if (dentry == NULL) {
dentry = opendata->dentry;
- } else if (dentry != ctx->dentry) {
+ } else {
dput(ctx->dentry);
- ctx->dentry = dget(dentry);
+ ctx->dentry = dentry;
}
nfs_set_verifier(dentry,
nfs_save_change_attribute(d_inode(opendata->dir)));
@@ -8054,7 +8054,6 @@ static void nfs4_layoutreturn_release(void *calldata)
pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
pnfs_clear_layoutreturn_waitbit(lo);
- lo->plh_block_lgets--;
spin_unlock(&lo->plh_inode->i_lock);
pnfs_free_lseg_list(&freeme);
pnfs_put_layout_hdr(lrp->args.layout);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index a9f096c7e99f..7d5351cd67fb 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -877,6 +877,7 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
&exp, &dentry);
if (err)
return err;
+ fh_unlock(&cstate->current_fh);
if (d_really_is_negative(dentry)) {
exp_put(exp);
err = nfserr_noent;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 51c9e9ca39a4..12935209deca 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1072,8 +1072,9 @@ nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename
READ_BUF(4);
rename->rn_snamelen = be32_to_cpup(p++);
- READ_BUF(rename->rn_snamelen + 4);
+ READ_BUF(rename->rn_snamelen);
SAVEMEM(rename->rn_sname, rename->rn_snamelen);
+ READ_BUF(4);
rename->rn_tnamelen = be32_to_cpup(p++);
READ_BUF(rename->rn_tnamelen);
SAVEMEM(rename->rn_tname, rename->rn_tnamelen);
@@ -1155,13 +1156,14 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
READ_BUF(8);
setclientid->se_callback_prog = be32_to_cpup(p++);
setclientid->se_callback_netid_len = be32_to_cpup(p++);
-
- READ_BUF(setclientid->se_callback_netid_len + 4);
+ READ_BUF(setclientid->se_callback_netid_len);
SAVEMEM(setclientid->se_callback_netid_val, setclientid->se_callback_netid_len);
+ READ_BUF(4);
setclientid->se_callback_addr_len = be32_to_cpup(p++);
- READ_BUF(setclientid->se_callback_addr_len + 4);
+ READ_BUF(setclientid->se_callback_addr_len);
SAVEMEM(setclientid->se_callback_addr_val, setclientid->se_callback_addr_len);
+ READ_BUF(4);
setclientid->se_callback_ident = be32_to_cpup(p++);
DECODE_TAIL;
@@ -1815,8 +1817,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
READ_BUF(4);
argp->taglen = be32_to_cpup(p++);
- READ_BUF(argp->taglen + 8);
+ READ_BUF(argp->taglen);
SAVEMEM(argp->tag, argp->taglen);
+ READ_BUF(8);
argp->minorversion = be32_to_cpup(p++);
argp->opcnt = be32_to_cpup(p++);
max_reply += 4 + (XDR_QUADLEN(argp->taglen) << 2);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 7f604727f487..e6795c7c76a8 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -956,6 +956,7 @@ clean_orphan:
tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
update_isize, end);
if (tmp_ret < 0) {
+ ocfs2_inode_unlock(inode, 1);
ret = tmp_ret;
mlog_errno(ret);
brelse(di_bh);
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index e36d63ff1783..f90931335c6b 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -262,6 +262,7 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
struct dlm_lock *lock, int flags, int type)
{
enum dlm_status status;
+ u8 old_owner = res->owner;
mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
@@ -287,6 +288,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
status = DLM_DENIED;
goto bail;
}
+
+ if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) {
+ mlog(0, "last convert request returned DLM_RECOVERING, but "
+ "owner has already queued and sent ast to me. res %.*s, "
+ "(cookie=%u:%llu, type=%d, conv=%d)\n",
+ res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ lock->ml.type, lock->ml.convert_type);
+ status = DLM_NORMAL;
+ goto bail;
+ }
+
res->state |= DLM_LOCK_RES_IN_PROGRESS;
/* move lock to local convert queue */
/* do not alter lock refcount. switching lists. */
@@ -316,11 +330,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
lock->convert_pending = 0;
- /* if it failed, move it back to granted queue */
+ /* if it failed, move it back to granted queue.
+ * if master returns DLM_NORMAL and then down before sending ast,
+ * it may have already been moved to granted queue, reset to
+ * DLM_RECOVERING and retry convert */
if (status != DLM_NORMAL) {
if (status != DLM_NOTQUEUED)
dlm_error(status);
dlm_revert_pending_convert(res, lock);
+ } else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
+ (old_owner != res->owner)) {
+ mlog(0, "res %.*s is in recovering or has been recovered.\n",
+ res->lockname.len, res->lockname.name);
+ status = DLM_RECOVERING;
}
bail:
spin_unlock(&res->spinlock);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 84f2f8079466..4e2162b355db 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2519,6 +2519,11 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
spin_lock(&dlm->master_lock);
ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
namelen, target, dlm->node_num);
+ /* get an extra reference on the mle.
+ * otherwise the assert_master from the new
+ * master will destroy this.
+ */
+ dlm_get_mle_inuse(mle);
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
@@ -2554,6 +2559,7 @@ fail:
if (mle_added) {
dlm_mle_detach_hb_events(dlm, mle);
dlm_put_mle(mle);
+ dlm_put_mle_inuse(mle);
} else if (mle) {
kmem_cache_free(dlm_mle_cache, mle);
mle = NULL;
@@ -2571,17 +2577,6 @@ fail:
* ensure that all assert_master work is flushed. */
flush_workqueue(dlm->dlm_worker);
- /* get an extra reference on the mle.
- * otherwise the assert_master from the new
- * master will destroy this.
- * also, make sure that all callers of dlm_get_mle
- * take both dlm->spinlock and dlm->master_lock */
- spin_lock(&dlm->spinlock);
- spin_lock(&dlm->master_lock);
- dlm_get_mle_inuse(mle);
- spin_unlock(&dlm->master_lock);
- spin_unlock(&dlm->spinlock);
-
/* notify new node and send all lock state */
/* call send_one_lockres with migration flag.
* this serves as notice to the target node that a
@@ -3312,6 +3307,15 @@ top:
mle->new_master != dead_node)
continue;
+ if (mle->new_master == dead_node && mle->inuse) {
+ mlog(ML_NOTICE, "%s: target %u died during "
+ "migration from %u, the MLE is "
+ "still keep used, ignore it!\n",
+ dlm->name, dead_node,
+ mle->master);
+ continue;
+ }
+
/* If we have reached this point, this mle needs to be
* removed from the list and freed. */
dlm_clean_migration_mle(dlm, mle);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 9e4f862d20fe..4a338803e7e9 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -2064,7 +2064,6 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
dlm_lock_get(lock);
if (lock->convert_pending) {
/* move converting lock back to granted */
- BUG_ON(i != DLM_CONVERTING_LIST);
mlog(0, "node died with convert pending "
"on %.*s. move back to granted list.\n",
res->lockname.len, res->lockname.name);
@@ -2360,6 +2359,8 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
break;
}
}
+ dlm_lockres_clear_refmap_bit(dlm, res,
+ dead_node);
spin_unlock(&res->spinlock);
continue;
}
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 20276e340339..b002acf50203 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1390,6 +1390,7 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
unsigned int gen;
int noqueue_attempted = 0;
int dlm_locked = 0;
+ int kick_dc = 0;
if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
mlog_errno(-EINVAL);
@@ -1524,7 +1525,12 @@ update_holders:
unlock:
lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+ /* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
+ kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
+
spin_unlock_irqrestore(&lockres->l_lock, flags);
+ if (kick_dc)
+ ocfs2_wake_downconvert_thread(osb);
out:
/*
* This is helping work around a lock inversion between the page lock
diff --git a/fs/open.c b/fs/open.c
index b6f1e96a7c0b..6a24f988d253 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -995,14 +995,12 @@ struct file *filp_open(const char *filename, int flags, umode_t mode)
EXPORT_SYMBOL(filp_open);
struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
- const char *filename, int flags)
+ const char *filename, int flags, umode_t mode)
{
struct open_flags op;
- int err = build_open_flags(flags, 0, &op);
+ int err = build_open_flags(flags, mode, &op);
if (err)
return ERR_PTR(err);
- if (flags & O_CREAT)
- return ERR_PTR(-EINVAL);
return do_file_open_root(dentry, mnt, filename, &op);
}
EXPORT_SYMBOL(file_open_root);
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 0a8983492d91..eff6319d5037 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -22,9 +22,9 @@
int ovl_copy_xattr(struct dentry *old, struct dentry *new)
{
- ssize_t list_size, size;
- char *buf, *name, *value;
- int error;
+ ssize_t list_size, size, value_size = 0;
+ char *buf, *name, *value = NULL;
+ int uninitialized_var(error);
if (!old->d_inode->i_op->getxattr ||
!new->d_inode->i_op->getxattr)
@@ -41,29 +41,40 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
if (!buf)
return -ENOMEM;
- error = -ENOMEM;
- value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL);
- if (!value)
- goto out;
-
list_size = vfs_listxattr(old, buf, list_size);
if (list_size <= 0) {
error = list_size;
- goto out_free_value;
+ goto out;
}
for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
- size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX);
- if (size <= 0) {
+retry:
+ size = vfs_getxattr(old, name, value, value_size);
+ if (size == -ERANGE)
+ size = vfs_getxattr(old, name, NULL, 0);
+
+ if (size < 0) {
error = size;
- goto out_free_value;
+ break;
+ }
+
+ if (size > value_size) {
+ void *new;
+
+ new = krealloc(value, size, GFP_KERNEL);
+ if (!new) {
+ error = -ENOMEM;
+ break;
+ }
+ value = new;
+ value_size = size;
+ goto retry;
}
+
error = vfs_setxattr(new, name, value, size, 0);
if (error)
- goto out_free_value;
+ break;
}
-
-out_free_value:
kfree(value);
out:
kfree(buf);
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 692ceda3bc21..a2b1d7ce3e1a 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -618,7 +618,8 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
* sole user of this dentry. Too tricky... Just unhash for
* now.
*/
- d_drop(dentry);
+ if (!err)
+ d_drop(dentry);
mutex_unlock(&dir->i_mutex);
return err;
@@ -903,6 +904,13 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
if (!overwrite && new_is_dir && !old_opaque && new_opaque)
ovl_remove_opaque(newdentry);
+ /*
+ * Old dentry now lives in different location. Dentries in
+ * lowerstack are stale. We cannot drop them here because
+ * access to them is lockless. This could be only pure upper
+ * or opaque directory - numlower is zero. Or upper non-dir
+ * entry - its pureness is tracked by flag opaque.
+ */
if (old_opaque != new_opaque) {
ovl_dentry_set_opaque(old, new_opaque);
if (!overwrite)
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 4060ffde8722..05ac9a95e881 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -42,6 +42,19 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
int err;
struct dentry *upperdentry;
+ /*
+ * Check for permissions before trying to copy-up. This is redundant
+ * since it will be rechecked later by ->setattr() on upper dentry. But
+ * without this, copy-up can be triggered by just about anybody.
+ *
+ * We don't initialize inode->size, which just means that
+ * inode_newsize_ok() will always check against MAX_LFS_FILESIZE and not
+ * check for a swapfile (which this won't be anyway).
+ */
+ err = inode_change_ok(dentry->d_inode, attr);
+ if (err)
+ return err;
+
err = ovl_want_write(dentry);
if (err)
goto out;
@@ -52,6 +65,8 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
mutex_lock(&upperdentry->d_inode->i_mutex);
err = notify_change(upperdentry, attr, NULL);
+ if (!err)
+ ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
mutex_unlock(&upperdentry->d_inode->i_mutex);
}
ovl_drop_write(dentry);
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index 70e9af551600..adcb1398c481 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -571,7 +571,8 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
(int) PTR_ERR(dentry));
continue;
}
- ovl_cleanup(upper->d_inode, dentry);
+ if (dentry->d_inode)
+ ovl_cleanup(upper->d_inode, dentry);
dput(dentry);
}
mutex_unlock(&upper->d_inode->i_mutex);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index e38ee0fed24a..a1acc6004a91 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -9,6 +9,7 @@
#include <linux/fs.h>
#include <linux/namei.h>
+#include <linux/pagemap.h>
#include <linux/xattr.h>
#include <linux/security.h>
#include <linux/mount.h>
@@ -75,12 +76,14 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry)
if (oe->__upperdentry) {
type = __OVL_PATH_UPPER;
- if (oe->numlower) {
- if (S_ISDIR(dentry->d_inode->i_mode))
- type |= __OVL_PATH_MERGE;
- } else if (!oe->opaque) {
+ /*
+ * Non-dir dentry can hold lower dentry from previous
+ * location. Its purity depends only on opaque flag.
+ */
+ if (oe->numlower && S_ISDIR(dentry->d_inode->i_mode))
+ type |= __OVL_PATH_MERGE;
+ else if (!oe->opaque)
type |= __OVL_PATH_PURE;
- }
} else {
if (oe->numlower > 1)
type |= __OVL_PATH_MERGE;
@@ -273,6 +276,37 @@ static void ovl_dentry_release(struct dentry *dentry)
}
}
+static struct dentry *ovl_d_real(struct dentry *dentry, struct inode *inode)
+{
+ struct dentry *real;
+
+ if (d_is_dir(dentry)) {
+ if (!inode || inode == d_inode(dentry))
+ return dentry;
+ goto bug;
+ }
+
+ real = ovl_dentry_upper(dentry);
+ if (real && (!inode || inode == d_inode(real)))
+ return real;
+
+ real = ovl_dentry_lower(dentry);
+ if (!real)
+ goto bug;
+
+ if (!inode || inode == d_inode(real))
+ return real;
+
+ /* Handle recursion */
+ if (real->d_flags & DCACHE_OP_REAL)
+ return real->d_op->d_real(real, inode);
+
+bug:
+ WARN(1, "ovl_d_real(%pd4, %s:%lu\n): real dentry not found\n", dentry,
+ inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
+ return dentry;
+}
+
static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags)
{
struct ovl_entry *oe = dentry->d_fsdata;
@@ -317,10 +351,13 @@ static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags)
static const struct dentry_operations ovl_dentry_operations = {
.d_release = ovl_dentry_release,
.d_select_inode = ovl_d_select_inode,
+ .d_real = ovl_d_real,
};
static const struct dentry_operations ovl_reval_dentry_operations = {
.d_release = ovl_dentry_release,
+ .d_select_inode = ovl_d_select_inode,
+ .d_real = ovl_d_real,
.d_revalidate = ovl_dentry_revalidate,
.d_weak_revalidate = ovl_dentry_weak_revalidate,
};
@@ -910,6 +947,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
}
sb->s_stack_depth = 0;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
if (ufs->config.upperdir) {
if (!ufs->config.workdir) {
pr_err("overlayfs: missing 'workdir'\n");
@@ -1053,6 +1091,9 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
root_dentry->d_fsdata = oe;
+ ovl_copyattr(ovl_dentry_real(root_dentry)->d_inode,
+ root_dentry->d_inode);
+
sb->s_magic = OVERLAYFS_SUPER_MAGIC;
sb->s_op = &ovl_super_operations;
sb->s_root = root_dentry;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index d73291f5f0fc..b6c00ce0e29e 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -395,7 +395,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
state = *get_task_state(task);
vsize = eip = esp = 0;
- permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
+ permitted = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT);
mm = get_task_mm(task);
if (mm) {
vsize = task_vsize(mm);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 3b6962c52965..57df8a52e780 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -403,7 +403,7 @@ static const struct file_operations proc_pid_cmdline_ops = {
static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
- struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
+ struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
if (mm && !IS_ERR(mm)) {
unsigned int nwords = 0;
do {
@@ -430,7 +430,8 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
wchan = get_wchan(task);
- if (wchan && ptrace_may_access(task, PTRACE_MODE_READ) && !lookup_symbol_name(wchan, symname))
+ if (wchan && ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)
+ && !lookup_symbol_name(wchan, symname))
seq_printf(m, "%s", symname);
else
seq_putc(m, '0');
@@ -444,7 +445,7 @@ static int lock_trace(struct task_struct *task)
int err = mutex_lock_killable(&task->signal->cred_guard_mutex);
if (err)
return err;
- if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
+ if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) {
mutex_unlock(&task->signal->cred_guard_mutex);
return -EPERM;
}
@@ -697,7 +698,7 @@ static int proc_fd_access_allowed(struct inode *inode)
*/
task = get_proc_task(inode);
if (task) {
- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
+ allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
put_task_struct(task);
}
return allowed;
@@ -732,7 +733,7 @@ static bool has_pid_permissions(struct pid_namespace *pid,
return true;
if (in_group_p(pid->pid_gid))
return true;
- return ptrace_may_access(task, PTRACE_MODE_READ);
+ return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
}
@@ -809,7 +810,7 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
struct mm_struct *mm = ERR_PTR(-ESRCH);
if (task) {
- mm = mm_access(task, mode);
+ mm = mm_access(task, mode | PTRACE_MODE_FSCREDS);
put_task_struct(task);
if (!IS_ERR_OR_NULL(mm)) {
@@ -1856,7 +1857,7 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
if (!task)
goto out_notask;
- mm = mm_access(task, PTRACE_MODE_READ);
+ mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
if (IS_ERR_OR_NULL(mm))
goto out;
@@ -2007,7 +2008,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
goto out;
result = -EACCES;
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
goto out_put_task;
result = -ENOENT;
@@ -2060,7 +2061,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
goto out;
ret = -EACCES;
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
goto out_put_task;
ret = 0;
@@ -2530,7 +2531,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh
if (result)
return result;
- if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
result = -EACCES;
goto out_unlock;
}
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index f6e8354b8cea..1b0ea4a5d89e 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -42,7 +42,7 @@ static const char *proc_ns_follow_link(struct dentry *dentry, void **cookie)
if (!task)
return error;
- if (ptrace_may_access(task, PTRACE_MODE_READ)) {
+ if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
error = ns_get_path(&ns_path, task, ns_ops);
if (!error)
nd_jump_link(&ns_path);
@@ -63,7 +63,7 @@ static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int bufl
if (!task)
return res;
- if (ptrace_may_access(task, PTRACE_MODE_READ)) {
+ if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
res = ns_get_name(name, sizeof(name), task, ns_ops);
if (res >= 0)
res = readlink_copy(buffer, buflen, name);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 91698054b965..67aa7e63a5c1 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1535,18 +1535,19 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end, struct mm_walk *walk)
{
+ pte_t huge_pte = huge_ptep_get(pte);
struct numa_maps *md;
struct page *page;
- if (!pte_present(*pte))
+ if (!pte_present(huge_pte))
return 0;
- page = pte_page(*pte);
+ page = pte_page(huge_pte);
if (!page)
return 0;
md = walk->private;
- gather_stats(page, md, pte_dirty(*pte), 1);
+ gather_stats(page, md, pte_dirty(huge_pte), 1);
return 0;
}
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 8ebd9a334085..87645955990d 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -197,6 +197,8 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
if (sb->s_op->show_devname) {
seq_puts(m, "device ");
err = sb->s_op->show_devname(m, mnt_path.dentry);
+ if (err)
+ goto out;
} else {
if (r->mnt_devname) {
seq_puts(m, "device ");
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index ef0d64b2a6d9..353ff31dcee1 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1398,7 +1398,7 @@ static int dquot_active(const struct inode *inode)
static int __dquot_initialize(struct inode *inode, int type)
{
int cnt, init_needed = 0;
- struct dquot **dquots, *got[MAXQUOTAS];
+ struct dquot **dquots, *got[MAXQUOTAS] = {};
struct super_block *sb = inode->i_sb;
qsize_t rsv;
int ret = 0;
@@ -1415,7 +1415,6 @@ static int __dquot_initialize(struct inode *inode, int type)
int rc;
struct dquot *dquot;
- got[cnt] = NULL;
if (type != -1 && cnt != type)
continue;
/*
diff --git a/fs/splice.c b/fs/splice.c
index 4cf700d50b40..0f77e9682857 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -185,6 +185,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
unsigned int spd_pages = spd->nr_pages;
int ret, do_wakeup, page_nr;
+ if (!spd_pages)
+ return 0;
+
ret = 0;
do_wakeup = 0;
page_nr = 0;
diff --git a/fs/super.c b/fs/super.c
index 1014e7cc355f..8d99a7b948ff 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -415,6 +415,7 @@ void generic_shutdown_super(struct super_block *sb)
sb->s_flags &= ~MS_ACTIVE;
fsnotify_unmount_inodes(sb);
+ cgroup_writeback_umount();
evict_inodes(sb);
diff --git a/fs/timerfd.c b/fs/timerfd.c
index b94fa6c3c6eb..053818dd6c18 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -153,7 +153,7 @@ static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
if (isalarm(ctx))
remaining = alarm_expires_remaining(&ctx->t.alarm);
else
- remaining = hrtimer_expires_remaining(&ctx->t.tmr);
+ remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr);
return remaining.tv64 < 0 ? ktime_set(0, 0): remaining;
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 8d0b3ade0ff0..566df9b5a6cb 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -2047,14 +2047,29 @@ void udf_write_aext(struct inode *inode, struct extent_position *epos,
epos->offset += adsize;
}
+/*
+ * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
+ * someone does some weird stuff.
+ */
+#define UDF_MAX_INDIR_EXTS 16
+
int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
{
int8_t etype;
+ unsigned int indirections = 0;
while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
(EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
int block;
+
+ if (++indirections > UDF_MAX_INDIR_EXTS) {
+ udf_err(inode->i_sb,
+ "too many indirect extents in inode %lu\n",
+ inode->i_ino);
+ return -1;
+ }
+
epos->block = *eloc;
epos->offset = sizeof(struct allocExtDesc);
brelse(epos->bh);
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index ab478e62baae..e788a05aab83 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -128,11 +128,15 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
if (c < 0x80U)
utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
else if (c < 0x800U) {
+ if (utf_o->u_len > (UDF_NAME_LEN - 4))
+ break;
utf_o->u_name[utf_o->u_len++] =
(uint8_t)(0xc0 | (c >> 6));
utf_o->u_name[utf_o->u_len++] =
(uint8_t)(0x80 | (c & 0x3f));
} else {
+ if (utf_o->u_len > (UDF_NAME_LEN - 5))
+ break;
utf_o->u_name[utf_o->u_len++] =
(uint8_t)(0xe0 | (c >> 12));
utf_o->u_name[utf_o->u_len++] =
@@ -173,17 +177,22 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length)
{
unsigned c, i, max_val, utf_char;
- int utf_cnt, u_len;
+ int utf_cnt, u_len, u_ch;
memset(ocu, 0, sizeof(dstring) * length);
ocu[0] = 8;
max_val = 0xffU;
+ u_ch = 1;
try_again:
u_len = 0U;
utf_char = 0U;
utf_cnt = 0U;
for (i = 0U; i < utf->u_len; i++) {
+ /* Name didn't fit? */
+ if (u_len + 1 + u_ch >= length)
+ return 0;
+
c = (uint8_t)utf->u_name[i];
/* Complete a multi-byte UTF-8 character */
@@ -225,6 +234,7 @@ try_again:
if (max_val == 0xffU) {
max_val = 0xffffU;
ocu[0] = (uint8_t)0x10U;
+ u_ch = 2;
goto try_again;
}
goto error_out;
@@ -277,7 +287,7 @@ static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
c = (c << 8) | ocu[i++];
len = nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
- UDF_NAME_LEN - utf_o->u_len);
+ UDF_NAME_LEN - 2 - utf_o->u_len);
/* Valid character? */
if (len >= 0)
utf_o->u_len += len;
@@ -295,15 +305,19 @@ static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni,
int len;
unsigned i, max_val;
uint16_t uni_char;
- int u_len;
+ int u_len, u_ch;
memset(ocu, 0, sizeof(dstring) * length);
ocu[0] = 8;
max_val = 0xffU;
+ u_ch = 1;
try_again:
u_len = 0U;
for (i = 0U; i < uni->u_len; i++) {
+ /* Name didn't fit? */
+ if (u_len + 1 + u_ch >= length)
+ return 0;
len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char);
if (!len)
continue;
@@ -316,6 +330,7 @@ try_again:
if (uni_char > max_val) {
max_val = 0xffffU;
ocu[0] = (uint8_t)0x10U;
+ u_ch = 2;
goto try_again;
}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 68a62457e685..d473e6e07a7e 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -287,6 +287,12 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
goto out;
/*
+ * We don't do userfault handling for the final child pid update.
+ */
+ if (current->flags & PF_EXITING)
+ goto out;
+
+ /*
* Check that we can return VM_FAULT_RETRY.
*
* NOTE: it should become possible to return VM_FAULT_RETRY
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 8774498ce0ff..e2536bb1c760 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -786,7 +786,7 @@ typedef struct xfs_agfl {
__be64 agfl_lsn;
__be32 agfl_crc;
__be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */
-} xfs_agfl_t;
+} __attribute__((packed)) xfs_agfl_t;
#define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 268c00f4f83a..65485cfc4ade 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -62,11 +62,12 @@ xfs_inobp_check(
* has not had the inode cores stamped into it. Hence for readahead, the buffer
* may be potentially invalid.
*
- * If the readahead buffer is invalid, we don't want to mark it with an error,
- * but we do want to clear the DONE status of the buffer so that a followup read
- * will re-read it from disk. This will ensure that we don't get an unnecessary
- * warnings during log recovery and we don't get unnecssary panics on debug
- * kernels.
+ * If the readahead buffer is invalid, we need to mark it with an error and
+ * clear the DONE status of the buffer so that a followup read will re-read it
+ * from disk. We don't report the error otherwise to avoid warnings during log
+ * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
+ * because all we want to do is say readahead failed; there is no-one to report
+ * the error to, so this will distinguish it from a non-ra verifier failure.
*/
static void
xfs_inode_buf_verify(
@@ -93,6 +94,7 @@ xfs_inode_buf_verify(
XFS_RANDOM_ITOBP_INOTOBP))) {
if (readahead) {
bp->b_flags &= ~XBF_DONE;
+ xfs_buf_ioerror(bp, -EIO);
return;
}
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 0ef7c2ed3f8a..4fa14820e2e2 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -202,8 +202,10 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
sbp->namelen,
sbp->valuelen,
&sbp->name[sbp->namelen]);
- if (error)
+ if (error) {
+ kmem_free(sbuf);
return error;
+ }
if (context->seen_enough)
break;
cursor->offset++;
@@ -454,14 +456,13 @@ xfs_attr3_leaf_list_int(
args.rmtblkcnt = xfs_attr3_rmt_blocks(
args.dp->i_mount, valuelen);
retval = xfs_attr_rmtval_get(&args);
- if (retval)
- return retval;
- retval = context->put_listent(context,
- entry->flags,
- name_rmt->name,
- (int)name_rmt->namelen,
- valuelen,
- args.value);
+ if (!retval)
+ retval = context->put_listent(context,
+ entry->flags,
+ name_rmt->name,
+ (int)name_rmt->namelen,
+ valuelen,
+ args.value);
kmem_free(args.value);
} else {
retval = context->put_listent(context,
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 3243cdf97f33..39090fc56f09 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -604,6 +604,13 @@ found:
}
}
+ /*
+ * Clear b_error if this is a lookup from a caller that doesn't expect
+ * valid data to be found in the buffer.
+ */
+ if (!(flags & XBF_READ))
+ xfs_buf_ioerror(bp, 0);
+
XFS_STATS_INC(target->bt_mount, xb_get);
trace_xfs_buf_get(bp, flags, _RET_IP_);
return bp;
@@ -1520,6 +1527,16 @@ xfs_wait_buftarg(
LIST_HEAD(dispose);
int loop = 0;
+ /*
+ * We need to flush the buffer workqueue to ensure that all IO
+ * completion processing is 100% done. Just waiting on buffer locks is
+ * not sufficient for async IO as the reference count held over IO is
+ * not released until after the buffer lock is dropped. Hence we need to
+ * ensure here that all reference counts have been dropped before we
+ * start walking the LRU list.
+ */
+ drain_workqueue(btp->bt_mount->m_buf_workqueue);
+
/* loop until there is nothing left on the lru list. */
while (list_lru_count(&btp->bt_lru)) {
list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index aa67339b9537..4f18fd92ca13 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -497,7 +497,6 @@ xfsaild(
long tout = 0; /* milliseconds */
current->flags |= PF_MEMALLOC;
- set_freezable();
while (!kthread_should_stop()) {
if (tout && tout <= 20)
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index c30266e94806..8ef0ccbf8167 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -29,16 +29,16 @@ do { \
* @nr: the bit to set
* @addr: the address to start counting from
*
- * This operation is like clear_bit_unlock, however it is not atomic.
- * It does provide release barrier semantics so it can be used to unlock
- * a bit lock, however it would only be used if no other CPU can modify
- * any bits in the memory until the lock is released (a good example is
- * if the bit lock itself protects access to the other bits in the word).
+ * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
+ * the bits in the word are protected by this lock some archs can use weaker
+ * ops to safely unlock.
+ *
+ * See for example x86's implementation.
*/
#define __clear_bit_unlock(nr, addr) \
do { \
- smp_mb(); \
- __clear_bit(nr, addr); \
+ smp_mb__before_atomic(); \
+ clear_bit(nr, addr); \
} while (0)
#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
index 0419485891f2..0f1c6f315cdc 100644
--- a/include/asm-generic/cputime_nsecs.h
+++ b/include/asm-generic/cputime_nsecs.h
@@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t;
*/
static inline cputime_t timespec_to_cputime(const struct timespec *val)
{
- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
+ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
return (__force cputime_t) ret;
}
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
@@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
*/
static inline cputime_t timeval_to_cputime(const struct timeval *val)
{
- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
+ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
+ val->tv_usec * NSEC_PER_USEC;
return (__force cputime_t) ret;
}
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 3d69c93d50e8..6361892ea737 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -204,6 +204,7 @@ struct crypto_ahash {
unsigned int keylen);
unsigned int reqsize;
+ bool has_setkey;
struct crypto_tfm base;
};
@@ -375,6 +376,11 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
+static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
+{
+ return tfm->has_setkey;
+}
+
/**
* crypto_ahash_finup() - update and finalize message digest
* @req: reference to the ahash_request handle that holds all information
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 018afb264ac2..a2bfd7843f18 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -30,6 +30,9 @@ struct alg_sock {
struct sock *parent;
+ unsigned int refcnt;
+ unsigned int nokey_refcnt;
+
const struct af_alg_type *type;
void *private;
};
@@ -50,9 +53,11 @@ struct af_alg_type {
void (*release)(void *private);
int (*setkey)(void *private, const u8 *key, unsigned int keylen);
int (*accept)(void *private, struct sock *sk);
+ int (*accept_nokey)(void *private, struct sock *sk);
int (*setauthsize)(void *private, unsigned int authsize);
struct proto_ops *ops;
+ struct proto_ops *ops_nokey;
struct module *owner;
char name[14];
};
@@ -67,6 +72,7 @@ int af_alg_register_type(const struct af_alg_type *type);
int af_alg_unregister_type(const struct af_alg_type *type);
int af_alg_release(struct socket *sock);
+void af_alg_release_parent(struct sock *sk);
int af_alg_accept(struct sock *sk, struct socket *newsock);
int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
@@ -83,11 +89,6 @@ static inline struct alg_sock *alg_sk(struct sock *sk)
return (struct alg_sock *)sk;
}
-static inline void af_alg_release_parent(struct sock *sk)
-{
- sock_put(alg_sk(sk)->parent);
-}
-
static inline void af_alg_init_completion(struct af_alg_completion *completion)
{
init_completion(&completion->completion);
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index d8dd41fb034f..fd8742a40ff3 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -61,6 +61,8 @@ struct crypto_skcipher {
unsigned int ivsize;
unsigned int reqsize;
+ bool has_setkey;
+
struct crypto_tfm base;
};
@@ -305,6 +307,11 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
return tfm->setkey(tfm, key, keylen);
}
+static inline bool crypto_skcipher_has_setkey(struct crypto_skcipher *tfm)
+{
+ return tfm->has_setkey;
+}
+
/**
* crypto_skcipher_reqtfm() - obtain cipher handle from request
* @req: skcipher_request out of which the cipher handle is to be obtained
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index 7bfb063029d8..461a0558bca4 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -35,4 +35,13 @@
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
+static inline bool drm_arch_can_wc_memory(void)
+{
+#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ return false;
+#else
+ return true;
+#endif
+}
+
#endif
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 5340099741ae..f356f9716474 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -44,8 +44,6 @@ struct drm_dp_vcpi {
/**
* struct drm_dp_mst_port - MST port
* @kref: reference count for this port.
- * @guid_valid: for DP 1.2 devices if we have validated the GUID.
- * @guid: guid for DP 1.2 device on this port.
* @port_num: port number
* @input: if this port is an input port.
* @mcs: message capability status - DP 1.2 spec.
@@ -70,10 +68,6 @@ struct drm_dp_vcpi {
struct drm_dp_mst_port {
struct kref kref;
- /* if dpcd 1.2 device is on this port - its GUID info */
- bool guid_valid;
- u8 guid[16];
-
u8 port_num;
bool input;
bool mcs;
@@ -109,10 +103,12 @@ struct drm_dp_mst_port {
* @tx_slots: transmission slots for this device.
* @last_seqno: last sequence number used to talk to this.
* @link_address_sent: if a link address message has been sent to this device yet.
+ * @guid: guid for DP 1.2 branch device. port under this branch can be
+ * identified by port #.
*
* This structure represents an MST branch device, there is one
- * primary branch device at the root, along with any others connected
- * to downstream ports
+ * primary branch device at the root, along with any other branches connected
+ * to downstream port of parent branches.
*/
struct drm_dp_mst_branch {
struct kref kref;
@@ -131,6 +127,9 @@ struct drm_dp_mst_branch {
struct drm_dp_sideband_msg_tx *tx_slots[2];
int last_seqno;
bool link_address_sent;
+
+ /* global unique identifier to identify branch devices */
+ u8 guid[16];
};
@@ -405,11 +404,9 @@ struct drm_dp_payload {
* @conn_base_id: DRM connector ID this mgr is connected to.
* @down_rep_recv: msg receiver state for down replies.
* @up_req_recv: msg receiver state for up requests.
- * @lock: protects mst state, primary, guid, dpcd.
+ * @lock: protects mst state, primary, dpcd.
* @mst_state: if this manager is enabled for an MST capable port.
* @mst_primary: pointer to the primary branch device.
- * @guid_valid: GUID valid for the primary branch device.
- * @guid: GUID for primary port.
* @dpcd: cache of DPCD for primary port.
* @pbn_div: PBN to slots divisor.
*
@@ -431,13 +428,11 @@ struct drm_dp_mst_topology_mgr {
struct drm_dp_sideband_msg_rx up_req_recv;
/* pointer to info about the initial MST device */
- struct mutex lock; /* protects mst_state + primary + guid + dpcd */
+ struct mutex lock; /* protects mst_state + primary + dpcd */
bool mst_state;
struct drm_dp_mst_branch *mst_primary;
- /* primary MST device GUID */
- bool guid_valid;
- u8 guid[16];
+
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 sink_count;
int pbn_div;
@@ -450,9 +445,7 @@ struct drm_dp_mst_topology_mgr {
the mstb tx_slots and txmsg->state once they are queued */
struct mutex qlock;
struct list_head tx_msg_downq;
- struct list_head tx_msg_upq;
bool tx_down_in_progress;
- bool tx_up_in_progress;
/* payload info + lock for it */
struct mutex payload_lock;
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index d639049a613d..553210c02ee0 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
+#define DRM_FIXED_EPSILON 1LL
+#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON)
static inline s64 drm_int2fixp(int a)
{
return ((s64)a) << DRM_FIXED_POINT;
}
-static inline int drm_fixp2int(int64_t a)
+static inline int drm_fixp2int(s64 a)
{
return ((s64)a) >> DRM_FIXED_POINT;
}
-static inline unsigned drm_fixp_msbset(int64_t a)
+static inline int drm_fixp2int_ceil(s64 a)
+{
+ if (a > 0)
+ return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
+ else
+ return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
+}
+
+static inline unsigned drm_fixp_msbset(s64 a)
{
unsigned shift, sign = (a >> 63) & 1;
@@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b)
return result;
}
+static inline s64 drm_fixp_from_fraction(s64 a, s64 b)
+{
+ s64 res;
+ bool a_neg = a < 0;
+ bool b_neg = b < 0;
+ u64 a_abs = a_neg ? -a : a;
+ u64 b_abs = b_neg ? -b : b;
+ u64 rem;
+
+ /* determine integer part */
+ u64 res_abs = div64_u64_rem(a_abs, b_abs, &rem);
+
+ /* determine fractional part */
+ {
+ u32 i = DRM_FIXED_POINT;
+
+ do {
+ rem <<= 1;
+ res_abs <<= 1;
+ if (rem >= b_abs) {
+ res_abs |= 1;
+ rem -= b_abs;
+ }
+ } while (--i != 0);
+ }
+
+ /* round up LSB */
+ {
+ u64 summand = (rem << 1) >= b_abs;
+
+ res_abs += summand;
+ }
+
+ res = (s64) res_abs;
+ if (a_neg ^ b_neg)
+ res = -res;
+ return res;
+}
+
static inline s64 drm_fixp_exp(s64 x)
{
s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index f1d8d0dbb4f1..fc1df05aeb85 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -100,10 +100,12 @@ struct mipi_dsi_host_ops {
struct mipi_dsi_host {
struct device *dev;
const struct mipi_dsi_host_ops *ops;
+ struct list_head list;
};
int mipi_dsi_host_register(struct mipi_dsi_host *host);
void mipi_dsi_host_unregister(struct mipi_dsi_host *host);
+struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node);
/* DSI mode flags */
@@ -139,10 +141,13 @@ enum mipi_dsi_pixel_format {
MIPI_DSI_FMT_RGB565,
};
+#define DSI_DEV_NAME_SIZE 20
+
/**
* struct mipi_dsi_device - DSI peripheral device
* @host: DSI host for this peripheral
* @dev: driver model device node for this peripheral
+ * @name: name of the dsi peripheral
* @channel: virtual channel assigned to the peripheral
* @format: pixel format for video mode
* @lanes: number of active data lanes
@@ -152,6 +157,8 @@ struct mipi_dsi_device {
struct mipi_dsi_host *host;
struct device dev;
+ char name[DSI_DEV_NAME_SIZE];
+
unsigned int channel;
unsigned int lanes;
enum mipi_dsi_pixel_format format;
@@ -163,6 +170,8 @@ static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
return container_of(dev, struct mipi_dsi_device, dev);
}
+struct mipi_dsi_device *mipi_dsi_new_dummy(struct mipi_dsi_host *host, u32 reg);
+void mipi_dsi_unregister_device(struct mipi_dsi_device *dsi);
struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
diff --git a/include/dt-bindings/clock/hi6220-clock.h b/include/dt-bindings/clock/hi6220-clock.h
index 70ee3833a7a0..8df5a24b6f9a 100644
--- a/include/dt-bindings/clock/hi6220-clock.h
+++ b/include/dt-bindings/clock/hi6220-clock.h
@@ -55,8 +55,8 @@
#define HI6220_TIMER7_PCLK 34
#define HI6220_TIMER8_PCLK 35
#define HI6220_UART0_PCLK 36
-
-#define HI6220_AO_NR_CLKS 37
+#define HI6220_RTC0_PCLK 41
+#define HI6220_AO_NR_CLKS 48
/* clk in Hi6220 systrl */
/* gate clock */
diff --git a/include/dt-bindings/pinctrl/hisi.h b/include/dt-bindings/pinctrl/hisi.h
new file mode 100755
index 000000000000..38f1ea879ea1
--- /dev/null
+++ b/include/dt-bindings/pinctrl/hisi.h
@@ -0,0 +1,59 @@
+/*
+ * This header provides constants for hisilicon pinctrl bindings.
+ *
+ * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 Linaro Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_HISI_H
+#define _DT_BINDINGS_PINCTRL_HISI_H
+
+/* iomg bit definition */
+#define MUX_M0 0
+#define MUX_M1 1
+#define MUX_M2 2
+#define MUX_M3 3
+#define MUX_M4 4
+#define MUX_M5 5
+#define MUX_M6 6
+#define MUX_M7 7
+
+/* iocg bit definition */
+#define PULL_MASK (3)
+#define PULL_DIS (0)
+#define PULL_UP (1 << 0)
+#define PULL_DOWN (1 << 1)
+
+/* drive strength definition */
+#define DRIVE_MASK (7 << 4)
+#define DRIVE1_02MA (0 << 4)
+#define DRIVE1_04MA (1 << 4)
+#define DRIVE1_08MA (2 << 4)
+#define DRIVE1_10MA (3 << 4)
+#define DRIVE2_02MA (0 << 4)
+#define DRIVE2_04MA (1 << 4)
+#define DRIVE2_08MA (2 << 4)
+#define DRIVE2_10MA (3 << 4)
+#define DRIVE3_04MA (0 << 4)
+#define DRIVE3_08MA (1 << 4)
+#define DRIVE3_12MA (2 << 4)
+#define DRIVE3_16MA (3 << 4)
+#define DRIVE3_20MA (4 << 4)
+#define DRIVE3_24MA (5 << 4)
+#define DRIVE3_32MA (6 << 4)
+#define DRIVE3_40MA (7 << 4)
+#define DRIVE4_02MA (0 << 4)
+#define DRIVE4_04MA (2 << 4)
+#define DRIVE4_08MA (4 << 4)
+#define DRIVE4_10MA (6 << 4)
+
+#endif
diff --git a/include/dt-bindings/reset/hisi,hi6220-resets.h b/include/dt-bindings/reset/hisi,hi6220-resets.h
new file mode 100644
index 000000000000..ca08a7e5248e
--- /dev/null
+++ b/include/dt-bindings/reset/hisi,hi6220-resets.h
@@ -0,0 +1,67 @@
+/**
+ * This header provides index for the reset controller
+ * based on hi6220 SoC.
+ */
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_HI6220
+#define _DT_BINDINGS_RESET_CONTROLLER_HI6220
+
+#define PERIPH_RSTDIS0_MMC0 0x000
+#define PERIPH_RSTDIS0_MMC1 0x001
+#define PERIPH_RSTDIS0_MMC2 0x002
+#define PERIPH_RSTDIS0_NANDC 0x003
+#define PERIPH_RSTDIS0_USBOTG_BUS 0x004
+#define PERIPH_RSTDIS0_POR_PICOPHY 0x005
+#define PERIPH_RSTDIS0_USBOTG 0x006
+#define PERIPH_RSTDIS0_USBOTG_32K 0x007
+#define PERIPH_RSTDIS1_HIFI 0x100
+#define PERIPH_RSTDIS1_DIGACODEC 0x105
+#define PERIPH_RSTEN2_IPF 0x200
+#define PERIPH_RSTEN2_SOCP 0x201
+#define PERIPH_RSTEN2_DMAC 0x202
+#define PERIPH_RSTEN2_SECENG 0x203
+#define PERIPH_RSTEN2_ABB 0x204
+#define PERIPH_RSTEN2_HPM0 0x205
+#define PERIPH_RSTEN2_HPM1 0x206
+#define PERIPH_RSTEN2_HPM2 0x207
+#define PERIPH_RSTEN2_HPM3 0x208
+#define PERIPH_RSTEN3_CSSYS 0x300
+#define PERIPH_RSTEN3_I2C0 0x301
+#define PERIPH_RSTEN3_I2C1 0x302
+#define PERIPH_RSTEN3_I2C2 0x303
+#define PERIPH_RSTEN3_I2C3 0x304
+#define PERIPH_RSTEN3_UART1 0x305
+#define PERIPH_RSTEN3_UART2 0x306
+#define PERIPH_RSTEN3_UART3 0x307
+#define PERIPH_RSTEN3_UART4 0x308
+#define PERIPH_RSTEN3_SSP 0x309
+#define PERIPH_RSTEN3_PWM 0x30a
+#define PERIPH_RSTEN3_BLPWM 0x30b
+#define PERIPH_RSTEN3_TSENSOR 0x30c
+#define PERIPH_RSTEN3_DAPB 0x312
+#define PERIPH_RSTEN3_HKADC 0x313
+#define PERIPH_RSTEN3_CODEC_SSI 0x314
+#define PERIPH_RSTEN3_PMUSSI1 0x316
+#define PERIPH_RSTEN8_RS0 0x400
+#define PERIPH_RSTEN8_RS2 0x401
+#define PERIPH_RSTEN8_RS3 0x402
+#define PERIPH_RSTEN8_MS0 0x403
+#define PERIPH_RSTEN8_MS2 0x405
+#define PERIPH_RSTEN8_XG2RAM0 0x406
+#define PERIPH_RSTEN8_X2SRAM_TZMA 0x407
+#define PERIPH_RSTEN8_SRAM 0x408
+#define PERIPH_RSTEN8_HARQ 0x40a
+#define PERIPH_RSTEN8_DDRC 0x40c
+#define PERIPH_RSTEN8_DDRC_APB 0x40d
+#define PERIPH_RSTEN8_DDRPACK_APB 0x40e
+#define PERIPH_RSTEN8_DDRT 0x411
+#define PERIPH_RSDIST9_CARM_DAP 0x500
+#define PERIPH_RSDIST9_CARM_ATB 0x501
+#define PERIPH_RSDIST9_CARM_LBUS 0x502
+#define PERIPH_RSDIST9_CARM_POR 0x503
+#define PERIPH_RSDIST9_CARM_CORE 0x504
+#define PERIPH_RSDIST9_CARM_DBG 0x505
+#define PERIPH_RSDIST9_CARM_L2 0x506
+#define PERIPH_RSDIST9_CARM_SOCDBG 0x507
+#define PERIPH_RSDIST9_CARM_ETM 0x508
+
+#endif /*_DT_BINDINGS_RESET_CONTROLLER_HI6220*/
diff --git a/include/linux/ata.h b/include/linux/ata.h
index d2992bfa1706..c1a2f345cbe6 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -487,8 +487,8 @@ enum ata_tf_protocols {
};
enum ata_ioctls {
- ATA_IOC_GET_IO32 = 0x309,
- ATA_IOC_SET_IO32 = 0x324,
+ ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */
+ ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */
};
/* core structures */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index b9b6e046b52e..fbe47bc700bd 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -310,6 +310,38 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
bio->bi_flags &= ~(1U << bit);
}
+static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
+{
+ *bv = bio_iovec(bio);
+}
+
+static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
+{
+ struct bvec_iter iter = bio->bi_iter;
+ int idx;
+
+ if (unlikely(!bio_multiple_segments(bio))) {
+ *bv = bio_iovec(bio);
+ return;
+ }
+
+ bio_advance_iter(bio, &iter, iter.bi_size);
+
+ if (!iter.bi_bvec_done)
+ idx = iter.bi_idx - 1;
+ else /* in the middle of bvec */
+ idx = iter.bi_idx;
+
+ *bv = bio->bi_io_vec[idx];
+
+ /*
+ * iter.bi_bvec_done records actual length of the last bvec
+ * if this bio ends in the middle of one io vector
+ */
+ if (iter.bi_bvec_done)
+ bv->bv_len = iter.bi_bvec_done;
+}
+
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c70e3588a48c..168755791ec8 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1367,6 +1367,13 @@ static inline void put_dev_sector(Sector p)
page_cache_release(p.v);
}
+static inline bool __bvec_gap_to_prev(struct request_queue *q,
+ struct bio_vec *bprv, unsigned int offset)
+{
+ return offset ||
+ ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+}
+
/*
* Check if adding a bio_vec after bprv with offset would create a gap in
* the SG list. Most drivers don't care about this, but some do.
@@ -1376,18 +1383,22 @@ static inline bool bvec_gap_to_prev(struct request_queue *q,
{
if (!queue_virt_boundary(q))
return false;
- return offset ||
- ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+ return __bvec_gap_to_prev(q, bprv, offset);
}
static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
struct bio *next)
{
- if (!bio_has_data(prev))
- return false;
+ if (bio_has_data(prev) && queue_virt_boundary(q)) {
+ struct bio_vec pb, nb;
+
+ bio_get_last_bvec(prev, &pb);
+ bio_get_first_bvec(next, &nb);
- return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1],
- next->bi_io_vec[0].bv_offset);
+ return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
+ }
+
+ return false;
}
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 71b1d6cdcb5d..8dbd7879fdc6 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -220,6 +220,7 @@ struct ceph_connection {
struct ceph_entity_addr actual_peer_addr;
/* message out temps */
+ struct ceph_msg_header out_hdr;
struct ceph_msg *out_msg; /* sending message (== tail of
out_sent) */
bool out_msg_done;
@@ -229,7 +230,6 @@ struct ceph_connection {
int out_kvec_left; /* kvec's left in out_kvec */
int out_skip; /* skip this many bytes */
int out_kvec_bytes; /* total bytes left */
- bool out_kvec_is_msg; /* kvec refers to out_msg */
int out_more; /* there is more data after the kvecs */
__le64 out_temp_ack; /* for writing an ack */
struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 4a4eea01956c..e63d3a513e67 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -133,6 +133,12 @@ struct cgroup_subsys_state {
*/
u64 serial_nr;
+ /*
+ * Incremented by online self and children. Used to guarantee that
+ * parents are not offlined before their children.
+ */
+ atomic_t online_cnt;
+
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
@@ -210,6 +216,9 @@ struct css_set {
/* all css_task_iters currently walking this cset */
struct list_head task_iters;
+ /* dead and being drained, ignore for migration */
+ bool dead;
+
/* For RCU-protected deletion */
struct rcu_head rcu_head;
};
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 22ab246feed3..eeae401a2412 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -199,7 +199,7 @@
#define unreachable() __builtin_unreachable()
/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__))
+#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
#endif /* GCC_VERSION >= 40500 */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 4dac1036594f..6fc9a6dd5ed2 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -144,7 +144,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
*/
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
#define __trace_if(cond) \
- if (__builtin_constant_p((cond)) ? !!(cond) : \
+ if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
({ \
int ______r; \
static struct ftrace_branch_data \
diff --git a/include/linux/console.h b/include/linux/console.h
index bd194343c346..ea731af2451e 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -150,6 +150,7 @@ extern int console_trylock(void);
extern void console_unlock(void);
extern void console_conditional_schedule(void);
extern void console_unblank(void);
+extern void console_flush_on_panic(void);
extern struct tty_driver *console_device(int *);
extern void console_stop(struct console *);
extern void console_start(struct console *);
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 85a868ccb493..fea160ee5803 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
task_unlock(current);
}
+extern void cpuset_post_attach_flush(void);
+
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
@@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
return false;
}
+static inline void cpuset_post_attach_flush(void)
+{
+}
+
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 5819e9b0ee22..da48880a76c6 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -161,6 +161,7 @@ struct dentry_operations {
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(struct dentry *, bool);
struct inode *(*d_select_inode)(struct dentry *, unsigned);
+ struct dentry *(*d_real)(struct dentry *, struct inode *);
void (*d_canonical_path)(const struct path *, struct path *);
} ____cacheline_aligned;
@@ -228,6 +229,7 @@ struct dentry_operations {
#define DCACHE_MAY_FREE 0x00800000
#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
+#define DCACHE_OP_REAL 0x08000000
extern seqlock_t rename_lock;
@@ -410,9 +412,7 @@ static inline bool d_mountpoint(const struct dentry *dentry)
*/
static inline unsigned __d_entry_type(const struct dentry *dentry)
{
- unsigned type = READ_ONCE(dentry->d_flags);
- smp_rmb();
- return type & DCACHE_ENTRY_TYPE;
+ return dentry->d_flags & DCACHE_ENTRY_TYPE;
}
static inline bool d_is_miss(const struct dentry *dentry)
@@ -585,4 +585,12 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
return upper;
}
+static inline struct dentry *d_real(struct dentry *dentry)
+{
+ if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
+ return dentry->d_op->d_real(dentry, NULL);
+ else
+ return dentry;
+}
+
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ec1c61c87d89..899ab9f8549e 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -124,6 +124,8 @@ struct dm_dev {
char name[16];
};
+dev_t dm_get_dev_t(const char *path);
+
/*
* Constructors should call these functions to ensure destination devices
* are opened/closed correctly.
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index 251a2090a554..e0ee0b3000b2 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -19,6 +19,8 @@
int devpts_new_index(struct inode *ptmx_inode);
void devpts_kill_index(struct inode *ptmx_inode, int idx);
+void devpts_add_ref(struct inode *ptmx_inode);
+void devpts_del_ref(struct inode *ptmx_inode);
/* mknod in devpts */
struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
void *priv);
@@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode);
/* Dummy stubs in the no-pty case */
static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
+static inline void devpts_add_ref(struct inode *ptmx_inode) { }
+static inline void devpts_del_ref(struct inode *ptmx_inode) { }
static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
dev_t device, int index, void *priv)
{
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 569b5a866bb1..47be3ad7d3e5 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1199,7 +1199,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
struct list_head *head, bool remove);
-bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len);
+bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
+ unsigned long data_size);
+bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
+ size_t len);
extern struct work_struct efivar_work;
void efivar_run_worker(void);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 5972ffe5719a..5110d4211866 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -446,8 +446,12 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
void bpf_prog_destroy(struct bpf_prog *fp);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
+int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
+ bool locked);
int sk_attach_bpf(u32 ufd, struct sock *sk);
int sk_detach_filter(struct sock *sk);
+int __sk_detach_filter(struct sock *sk, bool locked);
+
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
unsigned int len);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3aa514254161..ab3d8d9bb3ef 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1207,6 +1207,16 @@ static inline struct inode *file_inode(const struct file *f)
return f->f_inode;
}
+static inline struct dentry *file_dentry(const struct file *file)
+{
+ struct dentry *dentry = file->f_path.dentry;
+
+ if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
+ return dentry->d_op->d_real(dentry, file_inode(file));
+ else
+ return dentry;
+}
+
static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
{
return locks_lock_inode_wait(file_inode(filp), fl);
@@ -2217,7 +2227,7 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags,
extern struct file *file_open_name(struct filename *, int, umode_t);
extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
- const char *, int);
+ const char *, int, umode_t);
extern struct file * dentry_open(const struct path *, int, const struct cred *);
extern int filp_close(struct file *, fl_owner_t id);
diff --git a/include/linux/hisi_ion.h b/include/linux/hisi_ion.h
new file mode 100644
index 000000000000..b1e003378b24
--- /dev/null
+++ b/include/linux/hisi_ion.h
@@ -0,0 +1,56 @@
+/*
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_HISI_ION_H
+#define _LINUX_HISI_ION_H
+
+#include <ion/ion.h>
+
+/**
+ * These are the only ids that should be used for Ion heap ids.
+ * The ids listed are the order in which allocation will be attempted
+ * if specified. Don't swap the order of heap ids unless you know what
+ * you are doing!
+ * Id's are spaced by purpose to allow new Id's to be inserted in-between (for
+ * possible fallbacks)
+ */
+
+enum ion_heap_ids {
+ ION_SYSTEM_HEAP_ID = 0,
+ ION_SYSTEM_CONTIG_HEAP_ID = 1,
+ ION_GRALLOC_HEAP_ID = 2,
+ ION_OVERLAY_HEAP_ID = 7,
+ ION_FB_HEAP_ID = 10,
+ ION_VPU_HEAP_ID = 11,
+ ION_JPU_HEAP_ID = 12,
+};
+
+
+/**
+ * Macro should be used with ion_heap_ids defined above.
+ */
+#define ION_HEAP(bit) (1 << (bit))
+#define HISI_ION_NAME_LEN 16
+
+struct ion_heap_info_data{
+ char name[HISI_ION_NAME_LEN];
+ phys_addr_t heap_phy;
+ unsigned int heap_size;
+};
+
+int hisi_ion_get_heap_info(unsigned int id,struct ion_heap_info_data* data);
+struct ion_device * get_ion_device(void);
+
+#endif
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 76dd4f0da5ca..2ead22dd74a0 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -87,7 +87,8 @@ enum hrtimer_restart {
* @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
- * @start_pid: timer statistics field to store the pid of the task which
+ * @is_rel: Set if the timer was armed relative
+ * @start_pid: timer statistics field to store the pid of the task which
* started the timer
* @start_site: timer statistics field to store the site where the timer
* was started
@@ -101,7 +102,8 @@ struct hrtimer {
ktime_t _softexpires;
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
- unsigned long state;
+ u8 state;
+ u8 is_rel;
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
@@ -321,6 +323,27 @@ static inline void clock_was_set_delayed(void) { }
#endif
+static inline ktime_t
+__hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
+{
+ ktime_t rem = ktime_sub(timer->node.expires, now);
+
+ /*
+ * Adjust relative timers for the extra we added in
+ * hrtimer_start_range_ns() to prevent short timeouts.
+ */
+ if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel)
+ rem.tv64 -= hrtimer_resolution;
+ return rem;
+}
+
+static inline ktime_t
+hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
+{
+ return __hrtimer_expires_remaining_adjusted(timer,
+ timer->base->get_time());
+}
+
extern void clock_was_set(void);
#ifdef CONFIG_TIMERFD
extern void timerfd_clock_was_set(void);
@@ -390,7 +413,12 @@ static inline void hrtimer_restart(struct hrtimer *timer)
}
/* Query timers: */
-extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
+extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
+
+static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
+{
+ return __hrtimer_get_remaining(timer, false);
+}
extern u64 hrtimer_get_next_event(void);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 8fdc17b84739..ae6a711dcd1d 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -630,6 +630,11 @@ struct hv_input_signal_event_buffer {
struct hv_input_signal_event event;
};
+enum hv_signal_policy {
+ HV_SIGNAL_POLICY_DEFAULT = 0,
+ HV_SIGNAL_POLICY_EXPLICIT,
+};
+
struct vmbus_channel {
/* Unique channel id */
int id;
@@ -757,8 +762,21 @@ struct vmbus_channel {
* link up channels based on their CPU affinity.
*/
struct list_head percpu_list;
+ /*
+ * Host signaling policy: The default policy will be
+ * based on the ring buffer state. We will also support
+ * a policy where the client driver can have explicit
+ * signaling control.
+ */
+ enum hv_signal_policy signal_policy;
};
+static inline void set_channel_signal_state(struct vmbus_channel *c,
+ enum hv_signal_policy policy)
+{
+ c->signal_policy = policy;
+}
+
static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
{
c->batched_reading = state;
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index a338a688ee4a..dcb89e3515db 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -46,10 +46,6 @@ struct br_ip_list {
#define BR_LEARNING_SYNC BIT(9)
#define BR_PROXYARP_WIFI BIT(10)
-/* values as per ieee8021QBridgeFdbAgingTime */
-#define BR_MIN_AGEING_TIME (10 * HZ)
-#define BR_MAX_AGEING_TIME (1000000 * HZ)
-
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 821273ca4873..2d9b650047a5 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -235,6 +235,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
/* low 64 bit */
#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
+/* PRS_REG */
+#define DMA_PRS_PPR ((u32)1)
+
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
do { \
cycles_t start_time = get_cycles(); \
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 1eaf3d81f5c1..2955e672391d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -607,7 +607,7 @@ do { \
#define do_trace_printk(fmt, args...) \
do { \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
@@ -651,7 +651,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
*/
#define trace_puts(str) ({ \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(str) ? str : NULL; \
\
@@ -673,7 +673,7 @@ extern void trace_dump_stack(int skip);
#define ftrace_vprintk(fmt, vargs) \
do { \
if (__builtin_constant_p(fmt)) { \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 600c1e0626a5..b20a2752f934 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -718,7 +718,7 @@ struct ata_device {
union {
u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
- };
+ } ____cacheline_aligned;
/* DEVSLP Timing Variables from Identify Device Data Log */
u8 devslp_timing[ATA_LOG_DEVSLP_SIZE];
diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h
new file mode 100644
index 000000000000..dbbe9a644622
--- /dev/null
+++ b/include/linux/mfd/hi655x-pmic.h
@@ -0,0 +1,55 @@
+/*
+ * Device driver for regulators in hi655x IC
+ *
+ * Copyright (c) 2016 Hisilicon.
+ *
+ * Authors:
+ * Chen Feng <puck.chen@hisilicon.com>
+ * Fei Wang <w.f@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __HI655X_PMIC_H
+#define __HI655X_PMIC_H
+
+/* Hi655x registers are mapped to memory bus in 4 bytes stride */
+#define HI655X_STRIDE 4
+#define HI655X_BUS_ADDR(x) ((x) << 2)
+
+#define HI655X_BITS 8
+
+#define HI655X_NR_IRQ 32
+
+#define HI655X_IRQ_STAT_BASE (0x003 << 2)
+#define HI655X_IRQ_MASK_BASE (0x007 << 2)
+#define HI655X_ANA_IRQM_BASE (0x1b5 << 2)
+#define HI655X_IRQ_ARRAY 4
+#define HI655X_IRQ_MASK 0xFF
+#define HI655X_IRQ_CLR 0xFF
+#define HI655X_VER_REG 0x00
+
+#define PMU_VER_START 0x10
+#define PMU_VER_END 0x38
+
+#define RESERVE_INT BIT(7)
+#define PWRON_D20R_INT BIT(6)
+#define PWRON_D20F_INT BIT(5)
+#define PWRON_D4SR_INT BIT(4)
+#define VSYS_6P0_D200UR_INT BIT(3)
+#define VSYS_UV_D3R_INT BIT(2)
+#define VSYS_2P5_R_INT BIT(1)
+#define OTMP_D1R_INT BIT(0)
+
+struct hi655x_pmic {
+ struct resource *res;
+ struct device *dev;
+ struct regmap *regmap;
+ int gpio;
+ unsigned int ver;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+#endif
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index abc4767695e4..b2c9fada8eac 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -45,7 +45,7 @@ struct mlx5_core_cq {
atomic_t refcount;
struct completion free;
unsigned vector;
- int irqn;
+ unsigned int irqn;
void (*comp) (struct mlx5_core_cq *);
void (*event) (struct mlx5_core_cq *, enum mlx5_event);
struct mlx5_uar *uar;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5c857f2a20d7..af3efd9157f0 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -303,7 +303,7 @@ struct mlx5_eq {
u32 cons_index;
struct mlx5_buf buf;
int size;
- u8 irqn;
+ unsigned int irqn;
u8 eqn;
int nent;
u64 mask;
@@ -762,7 +762,8 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_start_eqs(struct mlx5_core_dev *dev);
int mlx5_stop_eqs(struct mlx5_core_dev *dev);
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn);
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
+ unsigned int *irqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index f67b2ec18e6d..6ccf2ec04a51 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -14,9 +14,10 @@
#ifndef LINUX_MMC_DW_MMC_H
#define LINUX_MMC_DW_MMC_H
-#include <linux/scatterlist.h>
-#include <linux/mmc/core.h>
#include <linux/dmaengine.h>
+#include <linux/mmc/core.h>
+#include <linux/reset.h>
+#include <linux/scatterlist.h>
#define MAX_MCI_SLOTS 2
@@ -276,6 +277,7 @@ struct dw_mci_board {
/* delay in mS before detecting cards after interrupt */
u32 detect_delay_ms;
+ struct reset_control *rstc;
struct dw_mci_dma_ops *dma_ops;
struct dma_pdata *data;
};
diff --git a/include/linux/module.h b/include/linux/module.h
index 3a19c79918e0..b229a9961d02 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -302,6 +302,12 @@ struct mod_tree_node {
struct latch_tree_node node;
};
+struct mod_kallsyms {
+ Elf_Sym *symtab;
+ unsigned int num_symtab;
+ char *strtab;
+};
+
struct module {
enum module_state state;
@@ -411,14 +417,9 @@ struct module {
#endif
#ifdef CONFIG_KALLSYMS
- /*
- * We keep the symbol and string tables for kallsyms.
- * The core_* fields below are temporary, loader-only (they
- * could really be discarded after module init).
- */
- Elf_Sym *symtab, *core_symtab;
- unsigned int num_symtab, core_num_syms;
- char *strtab, *core_strtab;
+ /* Protected by RCU and/or module_mutex: use rcu_dereference() */
+ struct mod_kallsyms *kallsyms;
+ struct mod_kallsyms core_kallsyms;
/* Section attributes */
struct module_sect_attrs *sect_attrs;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3143c847bddb..04c068e55353 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -265,6 +265,7 @@ struct header_ops {
void (*cache_update)(struct hh_cache *hh,
const struct net_device *dev,
const unsigned char *haddr);
+ bool (*validate)(const char *ll_header, unsigned int len);
};
/* These flag bits are private to the generic network queueing
@@ -1398,8 +1399,7 @@ enum netdev_priv_flags {
* @dma: DMA channel
* @mtu: Interface MTU value
* @type: Interface hardware type
- * @hard_header_len: Hardware header length, which means that this is the
- * minimum size of a packet.
+ * @hard_header_len: Maximum hardware header length.
*
* @needed_headroom: Extra headroom the hardware may need, but not in all
* cases can this be guaranteed
@@ -2493,6 +2493,24 @@ static inline int dev_parse_header(const struct sk_buff *skb,
return dev->header_ops->parse(skb, haddr);
}
+/* ll_header must have at least hard_header_len allocated */
+static inline bool dev_validate_header(const struct net_device *dev,
+ char *ll_header, int len)
+{
+ if (likely(len >= dev->hard_header_len))
+ return true;
+
+ if (capable(CAP_SYS_RAWIO)) {
+ memset(ll_header + len, 0, dev->hard_header_len - len);
+ return true;
+ }
+
+ if (dev->header_ops && dev->header_ops->validate)
+ return dev->header_ops->validate(ll_header, len);
+
+ return false;
+}
+
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
static inline int unregister_gifconf(unsigned int family)
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index c0e961474a52..5455b660bd88 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -544,9 +544,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
static inline loff_t nfs_size_to_loff_t(__u64 size)
{
- if (size > (__u64) OFFSET_MAX - 1)
- return OFFSET_MAX - 1;
- return (loff_t) size;
+ return min_t(u64, size, OFFSET_MAX);
}
static inline ino_t
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 6ae25aae88fd..e89c7ee7e803 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -359,6 +359,7 @@ struct pci_dev {
unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
unsigned int irq_managed:1;
unsigned int has_secondary_link:1;
+ unsigned int non_compliant_bars:1; /* broken BARs; ignore them */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
@@ -988,23 +989,6 @@ static inline int pci_is_managed(struct pci_dev *pdev)
return pdev->is_managed;
}
-static inline void pci_set_managed_irq(struct pci_dev *pdev, unsigned int irq)
-{
- pdev->irq = irq;
- pdev->irq_managed = 1;
-}
-
-static inline void pci_reset_managed_irq(struct pci_dev *pdev)
-{
- pdev->irq = 0;
- pdev->irq_managed = 0;
-}
-
-static inline bool pci_has_managed_irq(struct pci_dev *pdev)
-{
- return pdev->irq_managed && pdev->irq > 0;
-}
-
void pci_disable_device(struct pci_dev *dev);
extern unsigned int pcibios_max_latency;
diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h
index 5e0bc779e6c5..33f88b4479e4 100644
--- a/include/linux/platform_data/asoc-s3c.h
+++ b/include/linux/platform_data/asoc-s3c.h
@@ -39,6 +39,10 @@ struct samsung_i2s {
*/
struct s3c_audio_pdata {
int (*cfg_gpio)(struct platform_device *);
+ void *dma_playback;
+ void *dma_capture;
+ void *dma_play_sec;
+ void *dma_capture_mic;
union {
struct samsung_i2s i2s;
} type;
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 061265f92876..504c98a278d4 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -57,7 +57,29 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
#define PTRACE_MODE_READ 0x01
#define PTRACE_MODE_ATTACH 0x02
#define PTRACE_MODE_NOAUDIT 0x04
-/* Returns true on success, false on denial. */
+#define PTRACE_MODE_FSCREDS 0x08
+#define PTRACE_MODE_REALCREDS 0x10
+
+/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
+#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
+#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
+#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
+#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
+
+/**
+ * ptrace_may_access - check whether the caller is permitted to access
+ * a target task.
+ * @task: target task
+ * @mode: selects type of access and caller credentials
+ *
+ * Returns true on success, false on denial.
+ *
+ * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
+ * be set in @mode to specify whether the access was requested through
+ * a filesystem syscall (should use effective capabilities and fsuid
+ * of the caller) or through an explicit syscall such as
+ * process_vm_writev or ptrace (and should use the real credentials).
+ */
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
static inline int ptrace_reparented(struct task_struct *child)
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 33170dbd9db4..5d5174b59802 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -370,12 +370,28 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
struct radix_tree_iter *iter, unsigned flags);
/**
+ * radix_tree_iter_retry - retry this chunk of the iteration
+ * @iter: iterator state
+ *
+ * If we iterate over a tree protected only by the RCU lock, a race
+ * against deletion or creation may result in seeing a slot for which
+ * radix_tree_deref_retry() returns true. If so, call this function
+ * and continue the iteration.
+ */
+static inline __must_check
+void **radix_tree_iter_retry(struct radix_tree_iter *iter)
+{
+ iter->next_index = iter->index;
+ return NULL;
+}
+
+/**
* radix_tree_chunk_size - get current chunk size
*
* @iter: pointer to radix tree iterator
* Returns: current chunk size
*/
-static __always_inline unsigned
+static __always_inline long
radix_tree_chunk_size(struct radix_tree_iter *iter)
{
return iter->next_index - iter->index;
@@ -409,9 +425,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
return slot + offset + 1;
}
} else {
- unsigned size = radix_tree_chunk_size(iter) - 1;
+ long size = radix_tree_chunk_size(iter);
- while (size--) {
+ while (--size > 0) {
slot++;
iter->index++;
if (likely(*slot))
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 29446aeef36e..ddda2ac3446e 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -108,20 +108,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
__put_anon_vma(anon_vma);
}
-static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
-{
- struct anon_vma *anon_vma = vma->anon_vma;
- if (anon_vma)
- down_write(&anon_vma->root->rwsem);
-}
-
-static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
-{
- struct anon_vma *anon_vma = vma->anon_vma;
- if (anon_vma)
- up_write(&anon_vma->root->rwsem);
-}
-
static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
{
down_write(&anon_vma->root->rwsem);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index fa39434e3fdd..21a6e9649012 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -830,6 +830,7 @@ struct user_struct {
unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
#endif
unsigned long locked_shm; /* How many pages of mlocked shm ? */
+ unsigned long unix_inflight; /* How many files in flight in unix sockets */
#ifdef CONFIG_KEYS
struct key *uid_keyring; /* UID specific keyring */
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 50777b5b1e4c..92d112aeec68 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -15,10 +15,7 @@ struct shmem_inode_info {
unsigned int seals; /* shmem seals */
unsigned long flags;
unsigned long alloced; /* data pages alloced to file */
- union {
- unsigned long swapped; /* subtotal assigned to swap */
- char *symlink; /* unswappable short symlink */
- };
+ unsigned long swapped; /* subtotal assigned to swap */
struct shared_policy policy; /* NUMA memory alloc policy */
struct list_head swaplist; /* chain of maybes on swap */
struct simple_xattrs xattrs; /* list of xattrs */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4355129fff91..4fde61804191 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -219,6 +219,7 @@ struct sk_buff;
#else
#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
#endif
+extern int sysctl_max_skb_frags;
typedef struct skb_frag_struct skb_frag_t;
@@ -1907,6 +1908,30 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
skb->tail += len;
}
+/**
+ * skb_tailroom_reserve - adjust reserved_tailroom
+ * @skb: buffer to alter
+ * @mtu: maximum amount of headlen permitted
+ * @needed_tailroom: minimum amount of reserved_tailroom
+ *
+ * Set reserved_tailroom so that headlen can be as large as possible but
+ * not larger than mtu and tailroom cannot be smaller than
+ * needed_tailroom.
+ * The required headroom should already have been reserved before using
+ * this function.
+ */
+static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
+ unsigned int needed_tailroom)
+{
+ SKB_LINEAR_ASSERT(skb);
+ if (mtu < skb_tailroom(skb) - needed_tailroom)
+ /* use at most mtu */
+ skb->reserved_tailroom = skb_tailroom(skb) - mtu;
+ else
+ /* use up to all available space */
+ skb->reserved_tailroom = needed_tailroom;
+}
+
#define ENCAP_TYPE_ETHER 0
#define ENCAP_TYPE_IPPROTO 1
@@ -2723,6 +2748,23 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
+static inline void skb_postpush_rcsum(struct sk_buff *skb,
+ const void *start, unsigned int len)
+{
+ /* For performing the reverse operation to skb_postpull_rcsum(),
+ * we can instead of ...
+ *
+ * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
+ *
+ * ... just use this equivalent version here to save a few
+ * instructions. Feeding csum of 0 in csum_partial() and later
+ * on adding skb->csum is equivalent to feed skb->csum in the
+ * first place.
+ */
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_partial(start, len, skb->csum);
+}
+
/**
* pskb_trim_rcsum - trim received skb and update checksum
* @skb: buffer to trim
@@ -3446,7 +3488,8 @@ struct skb_gso_cb {
int encap_level;
__u16 csum_start;
};
-#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
+#define SKB_SGO_CB_OFFSET 32
+#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
{
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 613c29bd6baf..4a849f19e6c9 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -43,6 +43,9 @@
/* Default weight of a bound cooling device */
#define THERMAL_WEIGHT_DEFAULT 0
+/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
+#define THERMAL_TEMP_INVALID -274000
+
/* Unit conversion macros */
#define DECI_KELVIN_TO_CELSIUS(t) ({ \
long _t = (t); \
@@ -153,6 +156,7 @@ struct thermal_attr {
* @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
* @devdata: private pointer for device private data
* @trips: number of trip points the thermal zone supports
+ * @trips_disabled; bitmap for disabled trips
* @passive_delay: number of milliseconds to wait between polls when
* performing passive cooling.
* @polling_delay: number of milliseconds to wait between polls when
@@ -167,6 +171,7 @@ struct thermal_attr {
* @forced_passive: If > 0, temperature at which to switch on all ACPI
* processor cooling devices. Currently only used by the
* step-wise governor.
+ * @need_update: if equals 1, thermal_zone_device_update needs to be invoked.
* @ops: operations this &thermal_zone_device supports
* @tzp: thermal zone parameters
* @governor: pointer to the governor for this thermal zone
@@ -187,6 +192,7 @@ struct thermal_zone_device {
struct thermal_attr *trip_hyst_attrs;
void *devdata;
int trips;
+ unsigned long trips_disabled; /* bitmap for disabled trips */
int passive_delay;
int polling_delay;
int temperature;
@@ -194,6 +200,7 @@ struct thermal_zone_device {
int emul_temperature;
int passive;
unsigned int forced_passive;
+ atomic_t need_update;
struct thermal_zone_device_ops *ops;
struct thermal_zone_params *tzp;
struct thermal_governor *governor;
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index 0a0d56834c8e..9b366ee2f8f4 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -86,6 +86,7 @@ struct st_proto_s {
extern long st_register(struct st_proto_s *);
extern long st_unregister(struct st_proto_s *);
+extern struct ti_st_plat_data *dt_pdata;
/*
* header information used by st_core.c
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 429fdfc3baf5..925730bc9fc1 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -568,6 +568,8 @@ enum {
FILTER_DYN_STRING,
FILTER_PTR_STRING,
FILTER_TRACE_FN,
+ FILTER_COMM,
+ FILTER_CPU,
};
extern int trace_event_raw_init(struct trace_event_call *call);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 696a339c592c..27e32b2b602f 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -14,8 +14,10 @@
* See the file COPYING for more details.
*/
+#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/types.h>
+#include <linux/cpumask.h>
#include <linux/rcupdate.h>
#include <linux/static_key.h>
@@ -352,15 +354,19 @@ extern void syscall_unregfunc(void);
* "void *__data, proto" as the callback prototype.
*/
#define DECLARE_TRACE_NOARGS(name) \
- __DECLARE_TRACE(name, void, , 1, void *__data, __data)
+ __DECLARE_TRACE(name, void, , \
+ cpu_online(raw_smp_processor_id()), \
+ void *__data, __data)
#define DECLARE_TRACE(name, proto, args) \
- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1, \
- PARAMS(void *__data, proto), \
- PARAMS(__data, args))
+ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
+ cpu_online(raw_smp_processor_id()), \
+ PARAMS(void *__data, proto), \
+ PARAMS(__data, args))
#define DECLARE_TRACE_CONDITION(name, proto, args, cond) \
- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \
+ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
+ cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
PARAMS(void *__data, proto), \
PARAMS(__data, args))
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 5e31f1b99037..3bf03b6b52e9 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -594,7 +594,7 @@ static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
count = ld->ops->receive_buf2(ld->tty, p, f, count);
else {
count = min_t(int, count, ld->tty->receive_room);
- if (count)
+ if (count && ld->ops->receive_buf)
ld->ops->receive_buf(ld->tty, p, f, count);
}
return count;
@@ -654,6 +654,7 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
/* tty_mutex.c */
/* functions for preparation of BKL removal */
extern void __lockfunc tty_lock(struct tty_struct *tty);
+extern int tty_lock_interruptible(struct tty_struct *tty);
extern void __lockfunc tty_unlock(struct tty_struct *tty);
extern void __lockfunc tty_lock_slave(struct tty_struct *tty);
extern void __lockfunc tty_unlock_slave(struct tty_struct *tty);
diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
index cbb20afdbc01..bb679b48f408 100644
--- a/include/linux/ucs2_string.h
+++ b/include/linux/ucs2_string.h
@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s);
unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
+unsigned long ucs2_utf8size(const ucs2_char_t *src);
+unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
+ unsigned long maxlength);
+
#endif /* _LINUX_UCS2_STRING_H_ */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index b9a28074210f..b79925dd2b41 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -511,6 +511,8 @@ struct usb3_lpm_parameters {
* @usb2_hw_lpm_enabled: USB2 hardware LPM is enabled
* @usb2_hw_lpm_allowed: Userspace allows USB 2.0 LPM to be enabled
* @usb3_lpm_enabled: USB3 hardware LPM enabled
+ * @usb3_lpm_u1_enabled: USB3 hardware U1 LPM enabled
+ * @usb3_lpm_u2_enabled: USB3 hardware U2 LPM enabled
* @string_langid: language ID for strings
* @product: iProduct string, if present (static)
* @manufacturer: iManufacturer string, if present (static)
@@ -584,6 +586,8 @@ struct usb_device {
unsigned usb2_hw_lpm_enabled:1;
unsigned usb2_hw_lpm_allowed:1;
unsigned usb3_lpm_enabled:1;
+ unsigned usb3_lpm_u1_enabled:1;
+ unsigned usb3_lpm_u2_enabled:1;
int string_langid;
/* static strings from the device */
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 7f5f78bd15ad..245f57dbbb61 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -79,6 +79,8 @@
/* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
US_FLAG(MAX_SECTORS_240, 0x08000000) \
/* Sets max_sectors to 240 */ \
+ US_FLAG(NO_REPORT_LUNS, 0x10000000) \
+ /* Cannot handle REPORT_LUNS */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index b333c945e571..d0b5ca5d4e08 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -198,6 +198,7 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
void wbc_detach_inode(struct writeback_control *wbc);
void wbc_account_io(struct writeback_control *wbc, struct page *page,
size_t bytes);
+void cgroup_writeback_umount(void);
/**
* inode_attach_wb - associate an inode with its wb
@@ -301,6 +302,10 @@ static inline void wbc_account_io(struct writeback_control *wbc,
{
}
+static inline void cgroup_writeback_umount(void)
+{
+}
+
#endif /* CONFIG_CGROUP_WRITEBACK */
/*
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 2a91a0561a47..9b4c418bebd8 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -6,8 +6,8 @@
#include <linux/mutex.h>
#include <net/sock.h>
-void unix_inflight(struct file *fp);
-void unix_notinflight(struct file *fp);
+void unix_inflight(struct user_struct *user, struct file *fp);
+void unix_notinflight(struct user_struct *user, struct file *fp);
void unix_gc(void);
void wait_for_unix_gc(void);
struct sock *unix_get_socket(struct file *filp);
diff --git a/include/net/bonding.h b/include/net/bonding.h
index c1740a2794a3..93abe5f6188d 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -214,6 +214,7 @@ struct bonding {
* ALB mode (6) - to sync the use and modifications of its hash table
*/
spinlock_t mode_lock;
+ spinlock_t stats_lock;
u8 send_peer_notif;
u8 igmp_retrans;
#ifdef CONFIG_PROC_FS
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index 6816f0fa5693..30a56ab2ccfb 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -44,6 +44,24 @@ static inline bool skb_valid_dst(const struct sk_buff *skb)
return dst && !(dst->flags & DST_METADATA);
}
+static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
+ const struct sk_buff *skb_b)
+{
+ const struct metadata_dst *a, *b;
+
+ if (!(skb_a->_skb_refdst | skb_b->_skb_refdst))
+ return 0;
+
+ a = (const struct metadata_dst *) skb_dst(skb_a);
+ b = (const struct metadata_dst *) skb_dst(skb_b);
+
+ if (!a != !b || a->u.tun_info.options_len != b->u.tun_info.options_len)
+ return 1;
+
+ return memcmp(&a->u.tun_info, &b->u.tun_info,
+ sizeof(a->u.tun_info) + a->u.tun_info.options_len);
+}
+
struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 481fe1c9044c..49dcad4fe99e 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -270,8 +270,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
struct sock *newsk,
const struct request_sock *req);
-void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
- struct sock *child);
+struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
+ struct request_sock *req,
+ struct sock *child);
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout);
struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 84b20835b736..0dc0a51da38f 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -111,11 +111,24 @@ static inline void ipv4_copy_dscp(unsigned int dscp, struct iphdr *inner)
struct ipv6hdr;
-static inline int IP6_ECN_set_ce(struct ipv6hdr *iph)
+/* Note:
+ * IP_ECN_set_ce() has to tweak IPV4 checksum when setting CE,
+ * meaning both changes have no effect on skb->csum if/when CHECKSUM_COMPLETE
+ * In IPv6 case, no checksum compensates the change in IPv6 header,
+ * so we have to update skb->csum.
+ */
+static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
{
+ __be32 from, to;
+
if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph)))
return 0;
- *(__be32*)iph |= htonl(INET_ECN_CE << 20);
+
+ from = *(__be32 *)iph;
+ to = from | htonl(INET_ECN_CE << 20);
+ *(__be32 *)iph = to;
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_add(csum_sub(skb->csum, from), to);
return 1;
}
@@ -142,7 +155,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
case cpu_to_be16(ETH_P_IPV6):
if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
skb_tail_pointer(skb))
- return IP6_ECN_set_ce(ipv6_hdr(skb));
+ return IP6_ECN_set_ce(skb, ipv6_hdr(skb));
break;
}
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 4bbd221637cd..ba82feec2590 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -64,8 +64,16 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
void ip6_route_input(struct sk_buff *skb);
-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
- struct flowi6 *fl6);
+struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
+ struct flowi6 *fl6, int flags);
+
+static inline struct dst_entry *ip6_route_output(struct net *net,
+ const struct sock *sk,
+ struct flowi6 *fl6)
+{
+ return ip6_route_output_flags(net, sk, fl6, 0);
+}
+
struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
int flags);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 9f4df68105ab..3f98233388fb 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -61,6 +61,7 @@ struct fib_nh_exception {
struct rtable __rcu *fnhe_rth_input;
struct rtable __rcu *fnhe_rth_output;
unsigned long fnhe_stamp;
+ struct rcu_head rcu;
};
struct fnhe_hash_bucket {
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index 8f81bbbc38fc..e0f4109e64c6 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -439,6 +439,12 @@ int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
/* Send a single event to user space */
void wireless_send_event(struct net_device *dev, unsigned int cmd,
union iwreq_data *wrqu, const char *extra);
+#ifdef CONFIG_WEXT_CORE
+/* flush all previous wext events - if work is done from netdev notifiers */
+void wireless_nlevent_flush(void);
+#else
+static inline void wireless_nlevent_flush(void) {}
+#endif
/* We may need a function to send a stream of events to user space.
* More on that later... */
diff --git a/include/net/scm.h b/include/net/scm.h
index 262532d111f5..59fa93c01d2a 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -21,6 +21,7 @@ struct scm_creds {
struct scm_fp_list {
short count;
short max;
+ struct user_struct *user;
struct file *fp[SCM_MAX_FD];
};
diff --git a/include/net/tcp.h b/include/net/tcp.h
index bcce99a951f8..b36cebad6b2f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -450,7 +450,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
void tcp_v4_mtu_reduced(struct sock *sk);
-void tcp_req_err(struct sock *sk, u32 seq);
+void tcp_req_err(struct sock *sk, u32 seq, bool abort);
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
struct sock *tcp_create_openreq_child(const struct sock *sk,
struct request_sock *req,
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index f6cbef78db62..3b91ad5d5115 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -167,6 +167,10 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count);
int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
unsigned char *buffer, int count);
+int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ unsigned char *buffer, int count);
+int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
+ int count);
/* main midi functions */
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 56cf8e485ef2..28ee5c2e6bcd 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -94,5 +94,8 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
bool target_sense_desc_format(struct se_device *dev);
+sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
+bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
+ struct request_queue *q, int block_size);
#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index aabf0aca0171..689f4d207122 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -138,6 +138,7 @@ enum se_cmd_flags_table {
SCF_COMPARE_AND_WRITE = 0x00080000,
SCF_COMPARE_AND_WRITE_POST = 0x00100000,
SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
+ SCF_ACK_KREF = 0x00400000,
};
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -490,6 +491,8 @@ struct se_cmd {
#define CMD_T_DEV_ACTIVE (1 << 7)
#define CMD_T_REQUEST_STOP (1 << 8)
#define CMD_T_BUSY (1 << 9)
+#define CMD_T_TAS (1 << 10)
+#define CMD_T_FABRIC_STOP (1 << 11)
spinlock_t t_state_lock;
struct kref cmd_kref;
struct completion t_transport_stop_comp;
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index c2e5d6cb34e3..ebd10e624598 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -307,7 +307,7 @@ header-y += nfs_mount.h
header-y += nl80211.h
header-y += n_r3964.h
header-y += nubus.h
-header-y += nvme.h
+header-y += nvme_ioctl.h
header-y += nvram.h
header-y += omap3isp.h
header-y += omapfb.h
diff --git a/ipc/shm.c b/ipc/shm.c
index 41787276e141..3174634ca4e5 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -156,11 +156,12 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
/*
- * We raced in the idr lookup or with shm_destroy(). Either way, the
- * ID is busted.
+ * Callers of shm_lock() must validate the status of the returned ipc
+ * object pointer (as returned by ipc_lock()), and error out as
+ * appropriate.
*/
- WARN_ON(IS_ERR(ipcp));
-
+ if (IS_ERR(ipcp))
+ return (void *)ipcp;
return container_of(ipcp, struct shmid_kernel, shm_perm);
}
@@ -186,18 +187,33 @@ static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
}
-/* This is called by fork, once for every shm attach. */
-static void shm_open(struct vm_area_struct *vma)
+static int __shm_open(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
struct shmid_kernel *shp;
shp = shm_lock(sfd->ns, sfd->id);
+
+ if (IS_ERR(shp))
+ return PTR_ERR(shp);
+
shp->shm_atim = get_seconds();
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_nattch++;
shm_unlock(shp);
+ return 0;
+}
+
+/* This is called by fork, once for every shm attach. */
+static void shm_open(struct vm_area_struct *vma)
+{
+ int err = __shm_open(vma);
+ /*
+ * We raced in the idr lookup or with shm_destroy().
+ * Either way, the ID is busted.
+ */
+ WARN_ON_ONCE(err);
}
/*
@@ -260,6 +276,14 @@ static void shm_close(struct vm_area_struct *vma)
down_write(&shm_ids(ns).rwsem);
/* remove from the list of attaches of the shm segment */
shp = shm_lock(ns, sfd->id);
+
+ /*
+ * We raced in the idr lookup or with shm_destroy().
+ * Either way, the ID is busted.
+ */
+ if (WARN_ON_ONCE(IS_ERR(shp)))
+ goto done; /* no-op */
+
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_dtim = get_seconds();
shp->shm_nattch--;
@@ -267,6 +291,7 @@ static void shm_close(struct vm_area_struct *vma)
shm_destroy(ns, shp);
else
shm_unlock(shp);
+done:
up_write(&shm_ids(ns).rwsem);
}
@@ -388,17 +413,25 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
struct shm_file_data *sfd = shm_file_data(file);
int ret;
+ /*
+ * In case of remap_file_pages() emulation, the file can represent
+ * removed IPC ID: propogate shm_lock() error to caller.
+ */
+ ret =__shm_open(vma);
+ if (ret)
+ return ret;
+
ret = sfd->file->f_op->mmap(sfd->file, vma);
- if (ret != 0)
+ if (ret) {
+ shm_close(vma);
return ret;
+ }
sfd->vm_ops = vma->vm_ops;
#ifdef CONFIG_MMU
WARN_ON(!sfd->vm_ops->fault);
#endif
vma->vm_ops = &shm_vm_ops;
- shm_open(vma);
-
- return ret;
+ return 0;
}
static int shm_release(struct inode *ino, struct file *file)
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 4504ca66118d..50da680c479f 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -166,7 +166,7 @@ static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5)
if (!task)
return -EINVAL;
- memcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm)));
+ strlcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm)));
return 0;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index a7945d10b378..2e7f7ab739e4 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1121,6 +1121,16 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
return -EINVAL;
}
+ if ((opcode == BPF_LSH || opcode == BPF_RSH ||
+ opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
+ int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
+
+ if (insn->imm < 0 || insn->imm >= size) {
+ verbose("invalid shift %d\n", insn->imm);
+ return -EINVAL;
+ }
+ }
+
/* pattern match 'bpf_add Rx, imm' instruction */
if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
regs[insn->dst_reg].type == FRAME_PTR &&
@@ -2072,7 +2082,7 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
/* adjust offset of jmps if necessary */
if (i < pos && i + insn->off + 1 > pos)
insn->off += delta;
- else if (i > pos && i + insn->off + 1 < pos)
+ else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
insn->off -= delta;
}
}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a57b7c86871c..f0bc36879e1d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -57,7 +57,7 @@
#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
#include <linux/kthread.h>
#include <linux/delay.h>
-
+#include <linux/cpuset.h>
#include <linux/atomic.h>
/*
@@ -2498,6 +2498,14 @@ static void cgroup_migrate_add_src(struct css_set *src_cset,
lockdep_assert_held(&cgroup_mutex);
lockdep_assert_held(&css_set_lock);
+ /*
+ * If ->dead, @src_set is associated with one or more dead cgroups
+ * and doesn't contain any migratable tasks. Ignore it early so
+ * that the rest of migration path doesn't get confused by it.
+ */
+ if (src_cset->dead)
+ return;
+
src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
if (!list_empty(&src_cset->mg_preload_node))
@@ -2819,6 +2827,7 @@ out_unlock_rcu:
out_unlock_threadgroup:
percpu_up_write(&cgroup_threadgroup_rwsem);
cgroup_kn_unlock(of->kn);
+ cpuset_post_attach_flush();
return ret ?: nbytes;
}
@@ -4838,6 +4847,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
INIT_LIST_HEAD(&css->sibling);
INIT_LIST_HEAD(&css->children);
css->serial_nr = css_serial_nr_next++;
+ atomic_set(&css->online_cnt, 0);
if (cgroup_parent(cgrp)) {
css->parent = cgroup_css(cgroup_parent(cgrp), ss);
@@ -4860,6 +4870,10 @@ static int online_css(struct cgroup_subsys_state *css)
if (!ret) {
css->flags |= CSS_ONLINE;
rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
+
+ atomic_inc(&css->online_cnt);
+ if (css->parent)
+ atomic_inc(&css->parent->online_cnt);
}
return ret;
}
@@ -5091,10 +5105,15 @@ static void css_killed_work_fn(struct work_struct *work)
container_of(work, struct cgroup_subsys_state, destroy_work);
mutex_lock(&cgroup_mutex);
- offline_css(css);
- mutex_unlock(&cgroup_mutex);
- css_put(css);
+ do {
+ offline_css(css);
+ css_put(css);
+ /* @css can't go away while we're holding cgroup_mutex */
+ css = css->parent;
+ } while (css && atomic_dec_and_test(&css->online_cnt));
+
+ mutex_unlock(&cgroup_mutex);
}
/* css kill confirmation processing requires process context, bounce */
@@ -5103,8 +5122,10 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
- INIT_WORK(&css->destroy_work, css_killed_work_fn);
- queue_work(cgroup_destroy_wq, &css->destroy_work);
+ if (atomic_dec_and_test(&css->online_cnt)) {
+ INIT_WORK(&css->destroy_work, css_killed_work_fn);
+ queue_work(cgroup_destroy_wq, &css->destroy_work);
+ }
}
/**
@@ -5173,6 +5194,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
{
struct cgroup_subsys_state *css;
+ struct cgrp_cset_link *link;
int ssid;
lockdep_assert_held(&cgroup_mutex);
@@ -5193,11 +5215,18 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
return -EBUSY;
/*
- * Mark @cgrp dead. This prevents further task migration and child
- * creation by disabling cgroup_lock_live_group().
+ * Mark @cgrp and the associated csets dead. The former prevents
+ * further task migration and child creation by disabling
+ * cgroup_lock_live_group(). The latter makes the csets ignored by
+ * the migration path.
*/
cgrp->self.flags &= ~CSS_ONLINE;
+ spin_lock_bh(&css_set_lock);
+ list_for_each_entry(link, &cgrp->cset_links, cset_link)
+ link->cset->dead = true;
+ spin_unlock_bh(&css_set_lock);
+
/* initiate massacre of all css's */
for_each_css(css, ssid, cgrp)
kill_css(css);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 02a8ea5c9963..2ade632197d5 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -286,6 +286,8 @@ static struct cpuset top_cpuset = {
static DEFINE_MUTEX(cpuset_mutex);
static DEFINE_SPINLOCK(callback_lock);
+static struct workqueue_struct *cpuset_migrate_mm_wq;
+
/*
* CPU / memory hotplug is handled asynchronously.
*/
@@ -971,31 +973,51 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
}
/*
- * cpuset_migrate_mm
- *
- * Migrate memory region from one set of nodes to another.
- *
- * Temporarilly set tasks mems_allowed to target nodes of migration,
- * so that the migration code can allocate pages on these nodes.
- *
- * While the mm_struct we are migrating is typically from some
- * other task, the task_struct mems_allowed that we are hacking
- * is for our current task, which must allocate new pages for that
- * migrating memory region.
+ * Migrate memory region from one set of nodes to another. This is
+ * performed asynchronously as it can be called from process migration path
+ * holding locks involved in process management. All mm migrations are
+ * performed in the queued order and can be waited for by flushing
+ * cpuset_migrate_mm_wq.
*/
+struct cpuset_migrate_mm_work {
+ struct work_struct work;
+ struct mm_struct *mm;
+ nodemask_t from;
+ nodemask_t to;
+};
+
+static void cpuset_migrate_mm_workfn(struct work_struct *work)
+{
+ struct cpuset_migrate_mm_work *mwork =
+ container_of(work, struct cpuset_migrate_mm_work, work);
+
+ /* on a wq worker, no need to worry about %current's mems_allowed */
+ do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
+ mmput(mwork->mm);
+ kfree(mwork);
+}
+
static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
const nodemask_t *to)
{
- struct task_struct *tsk = current;
-
- tsk->mems_allowed = *to;
+ struct cpuset_migrate_mm_work *mwork;
- do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
+ mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
+ if (mwork) {
+ mwork->mm = mm;
+ mwork->from = *from;
+ mwork->to = *to;
+ INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
+ queue_work(cpuset_migrate_mm_wq, &mwork->work);
+ } else {
+ mmput(mm);
+ }
+}
- rcu_read_lock();
- guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed);
- rcu_read_unlock();
+void cpuset_post_attach_flush(void)
+{
+ flush_workqueue(cpuset_migrate_mm_wq);
}
/*
@@ -1096,7 +1118,8 @@ static void update_tasks_nodemask(struct cpuset *cs)
mpol_rebind_mm(mm, &cs->mems_allowed);
if (migrate)
cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
- mmput(mm);
+ else
+ mmput(mm);
}
css_task_iter_end(&it);
@@ -1541,11 +1564,11 @@ static void cpuset_attach(struct cgroup_taskset *tset)
* @old_mems_allowed is the right nodesets that we
* migrate mm from.
*/
- if (is_memory_migrate(cs)) {
+ if (is_memory_migrate(cs))
cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
&cpuset_attach_nodemask_to);
- }
- mmput(mm);
+ else
+ mmput(mm);
}
}
@@ -1710,6 +1733,7 @@ out_unlock:
mutex_unlock(&cpuset_mutex);
kernfs_unbreak_active_protection(of->kn);
css_put(&cs->css);
+ flush_workqueue(cpuset_migrate_mm_wq);
return retval ?: nbytes;
}
@@ -2355,6 +2379,9 @@ void __init cpuset_init_smp(void)
top_cpuset.effective_mems = node_states[N_MEMORY];
register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
+
+ cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
+ BUG_ON(!cpuset_migrate_mm_wq);
}
/**
diff --git a/kernel/events/core.c b/kernel/events/core.c
index cfc227ccfceb..1e889a078dbc 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1580,14 +1580,14 @@ event_sched_out(struct perf_event *event,
perf_pmu_disable(event->pmu);
+ event->tstamp_stopped = tstamp;
+ event->pmu->del(event, 0);
+ event->oncpu = -1;
event->state = PERF_EVENT_STATE_INACTIVE;
if (event->pending_disable) {
event->pending_disable = 0;
event->state = PERF_EVENT_STATE_OFF;
}
- event->tstamp_stopped = tstamp;
- event->pmu->del(event, 0);
- event->oncpu = -1;
if (!is_software_event(event))
cpuctx->active_oncpu--;
@@ -3434,7 +3434,7 @@ find_lively_task_by_vpid(pid_t vpid)
/* Reuse ptrace permission checks for now. */
err = -EACCES;
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
goto errout;
return task;
@@ -7979,6 +7979,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
}
}
+ /* symmetric to unaccount_event() in _free_event() */
+ account_event(event);
+
return event;
err_per_task:
@@ -8342,8 +8345,6 @@ SYSCALL_DEFINE5(perf_event_open,
}
}
- account_event(event);
-
/*
* Special case software events and allow them to be part of
* any hardware group.
@@ -8582,7 +8583,12 @@ err_context:
perf_unpin_context(ctx);
put_ctx(ctx);
err_alloc:
- free_event(event);
+ /*
+ * If event_file is set, the fput() above will have called ->release()
+ * and that will take care of freeing the event.
+ */
+ if (!event_file)
+ free_event(event);
err_cpus:
put_online_cpus();
err_task:
@@ -8626,8 +8632,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
/* Mark owner so we could distinguish it from user events. */
event->owner = EVENT_OWNER_KERNEL;
- account_event(event);
-
ctx = find_get_context(event->pmu, task, event);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
diff --git a/kernel/futex.c b/kernel/futex.c
index 684d7549825a..461c72b2dac2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2755,6 +2755,11 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
+ /*
+ * Drop the reference to the pi state which
+ * the requeue_pi() code acquired for us.
+ */
+ free_pi_state(q.pi_state);
spin_unlock(q.lock_ptr);
}
} else {
@@ -2881,7 +2886,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
}
ret = -EPERM;
- if (!ptrace_may_access(p, PTRACE_MODE_READ))
+ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
goto err_unlock;
head = p->robust_list;
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 55c8c9349cfe..4ae3232e7a28 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -155,7 +155,7 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
}
ret = -EPERM;
- if (!ptrace_may_access(p, PTRACE_MODE_READ))
+ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
goto err_unlock;
head = p->compat_robust_list;
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index a302cf9a2126..57bff7857e87 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -138,7 +138,8 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
unsigned int flags = 0, irq = desc->irq_data.irq;
struct irqaction *action = desc->action;
- do {
+ /* action might have become NULL since we dropped the lock */
+ while (action) {
irqreturn_t res;
trace_irq_handler_entry(irq, action);
@@ -173,7 +174,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
retval |= res;
action = action->next;
- } while (action);
+ }
add_interrupt_randomness(irq, flags);
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
index 0aa69ea1d8fd..3a47fa998fe0 100644
--- a/kernel/kcmp.c
+++ b/kernel/kcmp.c
@@ -122,8 +122,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
&task2->signal->cred_guard_mutex);
if (ret)
goto err;
- if (!ptrace_may_access(task1, PTRACE_MODE_READ) ||
- !ptrace_may_access(task2, PTRACE_MODE_READ)) {
+ if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
+ !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) {
ret = -EPERM;
goto err_unlock;
}
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 7658d32c5c78..25ced161ebeb 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -111,7 +111,7 @@ EXPORT_SYMBOL(memunmap);
static void devm_memremap_release(struct device *dev, void *res)
{
- memunmap(res);
+ memunmap(*(void **)res);
}
static int devm_memremap_match(struct device *dev, void *res, void *match_data)
@@ -133,8 +133,10 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
if (addr) {
*ptr = addr;
devres_add(dev, ptr);
- } else
+ } else {
devres_free(ptr);
+ return ERR_PTR(-ENXIO);
+ }
return addr;
}
diff --git a/kernel/module.c b/kernel/module.c
index 38c7bd5583ff..0e5c71195f18 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -327,6 +327,9 @@ struct load_info {
struct _ddebug *debug;
unsigned int num_debug;
bool sig_ok;
+#ifdef CONFIG_KALLSYMS
+ unsigned long mod_kallsyms_init_off;
+#endif
struct {
unsigned int sym, str, mod, vers, info, pcpu;
} index;
@@ -2492,10 +2495,21 @@ static void layout_symtab(struct module *mod, struct load_info *info)
strsect->sh_flags |= SHF_ALLOC;
strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
info->index.str) | INIT_OFFSET_MASK;
- mod->init_size = debug_align(mod->init_size);
pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
+
+ /* We'll tack temporary mod_kallsyms on the end. */
+ mod->init_size = ALIGN(mod->init_size,
+ __alignof__(struct mod_kallsyms));
+ info->mod_kallsyms_init_off = mod->init_size;
+ mod->init_size += sizeof(struct mod_kallsyms);
+ mod->init_size = debug_align(mod->init_size);
}
+/*
+ * We use the full symtab and strtab which layout_symtab arranged to
+ * be appended to the init section. Later we switch to the cut-down
+ * core-only ones.
+ */
static void add_kallsyms(struct module *mod, const struct load_info *info)
{
unsigned int i, ndst;
@@ -2504,28 +2518,33 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
char *s;
Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
- mod->symtab = (void *)symsec->sh_addr;
- mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
+ /* Set up to point into init section. */
+ mod->kallsyms = mod->module_init + info->mod_kallsyms_init_off;
+
+ mod->kallsyms->symtab = (void *)symsec->sh_addr;
+ mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
/* Make sure we get permanent strtab: don't use info->strtab. */
- mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
+ mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
/* Set types up while we still have access to sections. */
- for (i = 0; i < mod->num_symtab; i++)
- mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
-
- mod->core_symtab = dst = mod->module_core + info->symoffs;
- mod->core_strtab = s = mod->module_core + info->stroffs;
- src = mod->symtab;
- for (ndst = i = 0; i < mod->num_symtab; i++) {
+ for (i = 0; i < mod->kallsyms->num_symtab; i++)
+ mod->kallsyms->symtab[i].st_info
+ = elf_type(&mod->kallsyms->symtab[i], info);
+
+ /* Now populate the cut down core kallsyms for after init. */
+ mod->core_kallsyms.symtab = dst = mod->module_core + info->symoffs;
+ mod->core_kallsyms.strtab = s = mod->module_core + info->stroffs;
+ src = mod->kallsyms->symtab;
+ for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
if (i == 0 ||
is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
dst[ndst] = src[i];
- dst[ndst++].st_name = s - mod->core_strtab;
- s += strlcpy(s, &mod->strtab[src[i].st_name],
+ dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
+ s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
KSYM_NAME_LEN) + 1;
}
}
- mod->core_num_syms = ndst;
+ mod->core_kallsyms.num_symtab = ndst;
}
#else
static inline void layout_symtab(struct module *mod, struct load_info *info)
@@ -3274,9 +3293,8 @@ static noinline int do_init_module(struct module *mod)
module_put(mod);
trim_init_extable(mod);
#ifdef CONFIG_KALLSYMS
- mod->num_symtab = mod->core_num_syms;
- mod->symtab = mod->core_symtab;
- mod->strtab = mod->core_strtab;
+ /* Switch to core kallsyms now init is done: kallsyms may be walking! */
+ rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
#endif
mod_tree_remove_init(mod);
unset_module_init_ro_nx(mod);
@@ -3515,7 +3533,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
/* Module is ready to execute: parsing args may do that. */
after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
- -32768, 32767, NULL,
+ -32768, 32767, mod,
unknown_module_param_cb);
if (IS_ERR(after_dashes)) {
err = PTR_ERR(after_dashes);
@@ -3646,6 +3664,11 @@ static inline int is_arm_mapping_symbol(const char *str)
&& (str[2] == '\0' || str[2] == '.');
}
+static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
+{
+ return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
+}
+
static const char *get_ksymbol(struct module *mod,
unsigned long addr,
unsigned long *size,
@@ -3653,6 +3676,7 @@ static const char *get_ksymbol(struct module *mod,
{
unsigned int i, best = 0;
unsigned long nextval;
+ struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
/* At worse, next value is at end of module */
if (within_module_init(addr, mod))
@@ -3662,32 +3686,32 @@ static const char *get_ksymbol(struct module *mod,
/* Scan for closest preceding symbol, and next symbol. (ELF
starts real symbols at 1). */
- for (i = 1; i < mod->num_symtab; i++) {
- if (mod->symtab[i].st_shndx == SHN_UNDEF)
+ for (i = 1; i < kallsyms->num_symtab; i++) {
+ if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
continue;
/* We ignore unnamed symbols: they're uninformative
* and inserted at a whim. */
- if (mod->symtab[i].st_value <= addr
- && mod->symtab[i].st_value > mod->symtab[best].st_value
- && *(mod->strtab + mod->symtab[i].st_name) != '\0'
- && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
+ if (*symname(kallsyms, i) == '\0'
+ || is_arm_mapping_symbol(symname(kallsyms, i)))
+ continue;
+
+ if (kallsyms->symtab[i].st_value <= addr
+ && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
best = i;
- if (mod->symtab[i].st_value > addr
- && mod->symtab[i].st_value < nextval
- && *(mod->strtab + mod->symtab[i].st_name) != '\0'
- && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
- nextval = mod->symtab[i].st_value;
+ if (kallsyms->symtab[i].st_value > addr
+ && kallsyms->symtab[i].st_value < nextval)
+ nextval = kallsyms->symtab[i].st_value;
}
if (!best)
return NULL;
if (size)
- *size = nextval - mod->symtab[best].st_value;
+ *size = nextval - kallsyms->symtab[best].st_value;
if (offset)
- *offset = addr - mod->symtab[best].st_value;
- return mod->strtab + mod->symtab[best].st_name;
+ *offset = addr - kallsyms->symtab[best].st_value;
+ return symname(kallsyms, best);
}
/* For kallsyms to ask for address resolution. NULL means not found. Careful
@@ -3777,19 +3801,21 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
preempt_disable();
list_for_each_entry_rcu(mod, &modules, list) {
+ struct mod_kallsyms *kallsyms;
+
if (mod->state == MODULE_STATE_UNFORMED)
continue;
- if (symnum < mod->num_symtab) {
- *value = mod->symtab[symnum].st_value;
- *type = mod->symtab[symnum].st_info;
- strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
- KSYM_NAME_LEN);
+ kallsyms = rcu_dereference_sched(mod->kallsyms);
+ if (symnum < kallsyms->num_symtab) {
+ *value = kallsyms->symtab[symnum].st_value;
+ *type = kallsyms->symtab[symnum].st_info;
+ strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
strlcpy(module_name, mod->name, MODULE_NAME_LEN);
*exported = is_exported(name, *value, mod);
preempt_enable();
return 0;
}
- symnum -= mod->num_symtab;
+ symnum -= kallsyms->num_symtab;
}
preempt_enable();
return -ERANGE;
@@ -3798,11 +3824,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
static unsigned long mod_find_symname(struct module *mod, const char *name)
{
unsigned int i;
+ struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
- for (i = 0; i < mod->num_symtab; i++)
- if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
- mod->symtab[i].st_info != 'U')
- return mod->symtab[i].st_value;
+ for (i = 0; i < kallsyms->num_symtab; i++)
+ if (strcmp(name, symname(kallsyms, i)) == 0 &&
+ kallsyms->symtab[i].st_info != 'U')
+ return kallsyms->symtab[i].st_value;
return 0;
}
@@ -3841,11 +3868,14 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
module_assert_mutex();
list_for_each_entry(mod, &modules, list) {
+ /* We hold module_mutex: no need for rcu_dereference_sched */
+ struct mod_kallsyms *kallsyms = mod->kallsyms;
+
if (mod->state == MODULE_STATE_UNFORMED)
continue;
- for (i = 0; i < mod->num_symtab; i++) {
- ret = fn(data, mod->strtab + mod->symtab[i].st_name,
- mod, mod->symtab[i].st_value);
+ for (i = 0; i < kallsyms->num_symtab; i++) {
+ ret = fn(data, symname(kallsyms, i),
+ mod, kallsyms->symtab[i].st_value);
if (ret != 0)
return ret;
}
diff --git a/kernel/panic.c b/kernel/panic.c
index f07bfc9fe613..223564d3e1f8 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -160,8 +160,7 @@ void panic(const char *fmt, ...)
* panic() is not being callled from OOPS.
*/
debug_locks_off();
- console_trylock();
- console_unlock();
+ console_flush_on_panic();
if (!panic_blink)
panic_blink = no_blink;
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index b7342a24f559..b7dd5718836e 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -339,6 +339,7 @@ int hibernation_snapshot(int platform_mode)
pm_message_t msg;
int error;
+ pm_suspend_clear_flags();
error = platform_begin(platform_mode);
if (error)
goto Close;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 28fb44dccbad..e7e586bb2022 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2241,13 +2241,24 @@ void console_unlock(void)
static u64 seen_seq;
unsigned long flags;
bool wake_klogd = false;
- bool retry;
+ bool do_cond_resched, retry;
if (console_suspended) {
up_console_sem();
return;
}
+ /*
+ * Console drivers are called under logbuf_lock, so
+ * @console_may_schedule should be cleared before; however, we may
+ * end up dumping a lot of lines, for example, if called from
+ * console registration path, and should invoke cond_resched()
+ * between lines if allowable. Not doing so can cause a very long
+ * scheduling stall on a slow console leading to RCU stall and
+ * softlockup warnings which exacerbate the issue with more
+ * messages practically incapacitating the system.
+ */
+ do_cond_resched = console_may_schedule;
console_may_schedule = 0;
/* flush buffered message fragment immediately to console */
@@ -2319,6 +2330,9 @@ skip:
call_console_drivers(level, ext_text, ext_len, text, len);
start_critical_timings();
local_irq_restore(flags);
+
+ if (do_cond_resched)
+ cond_resched();
}
console_locked = 0;
@@ -2386,6 +2400,25 @@ void console_unblank(void)
console_unlock();
}
+/**
+ * console_flush_on_panic - flush console content on panic
+ *
+ * Immediately output all pending messages no matter what.
+ */
+void console_flush_on_panic(void)
+{
+ /*
+ * If someone else is holding the console lock, trylock will fail
+ * and may_schedule may be set. Ignore and proceed to unlock so
+ * that messages are flushed out. As this can be called from any
+ * context and we don't want to get preempted while flushing,
+ * ensure may_schedule is cleared.
+ */
+ console_trylock();
+ console_may_schedule = 0;
+ console_unlock();
+}
+
/*
* Return the console tty driver structure and its associated index
*/
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index b760bae64cf1..3189e51db7e8 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -219,6 +219,14 @@ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
{
const struct cred *cred = current_cred(), *tcred;
+ int dumpable = 0;
+ kuid_t caller_uid;
+ kgid_t caller_gid;
+
+ if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
+ WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
+ return -EPERM;
+ }
/* May we inspect the given task?
* This check is used both for attaching with ptrace
@@ -228,18 +236,33 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
* because setting up the necessary parent/child relationship
* or halting the specified task is impossible.
*/
- int dumpable = 0;
+
/* Don't let security modules deny introspection */
if (same_thread_group(task, current))
return 0;
rcu_read_lock();
+ if (mode & PTRACE_MODE_FSCREDS) {
+ caller_uid = cred->fsuid;
+ caller_gid = cred->fsgid;
+ } else {
+ /*
+ * Using the euid would make more sense here, but something
+ * in userland might rely on the old behavior, and this
+ * shouldn't be a security problem since
+ * PTRACE_MODE_REALCREDS implies that the caller explicitly
+ * used a syscall that requests access to another process
+ * (and not a filesystem syscall to procfs).
+ */
+ caller_uid = cred->uid;
+ caller_gid = cred->gid;
+ }
tcred = __task_cred(task);
- if (uid_eq(cred->uid, tcred->euid) &&
- uid_eq(cred->uid, tcred->suid) &&
- uid_eq(cred->uid, tcred->uid) &&
- gid_eq(cred->gid, tcred->egid) &&
- gid_eq(cred->gid, tcred->sgid) &&
- gid_eq(cred->gid, tcred->gid))
+ if (uid_eq(caller_uid, tcred->euid) &&
+ uid_eq(caller_uid, tcred->suid) &&
+ uid_eq(caller_uid, tcred->uid) &&
+ gid_eq(caller_gid, tcred->egid) &&
+ gid_eq(caller_gid, tcred->sgid) &&
+ gid_eq(caller_gid, tcred->gid))
goto ok;
if (ptrace_has_cap(tcred->user_ns, mode))
goto ok;
@@ -306,7 +329,7 @@ static int ptrace_attach(struct task_struct *task, long request,
goto out;
task_lock(task);
- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
task_unlock(task);
if (retval)
goto unlock_creds;
diff --git a/kernel/resource.c b/kernel/resource.c
index f150dbbe6f62..249b1eb1e6e1 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1083,9 +1083,10 @@ struct resource * __request_region(struct resource *parent,
if (!conflict)
break;
if (conflict != parent) {
- parent = conflict;
- if (!(conflict->flags & IORESOURCE_BUSY))
+ if (!(conflict->flags & IORESOURCE_BUSY)) {
+ parent = conflict;
continue;
+ }
}
if (conflict->flags & flags & IORESOURCE_MUXED) {
add_wait_queue(&muxed_resource_wait, &wait);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 61b0914cc7aa..f11fdb5c8084 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5525,6 +5525,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_UP_PREPARE:
rq->calc_load_update = calc_load_update;
+ account_reset_rq(rq);
break;
case CPU_ONLINE:
@@ -6738,7 +6739,7 @@ static void sched_init_numa(void)
sched_domains_numa_masks[i][j] = mask;
- for (k = 0; k < nr_node_ids; k++) {
+ for_each_node(k) {
if (node_distance(j, k) > sched_domains_numa_distance[i])
continue;
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 05de80b48586..f74ea89e77a8 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -259,21 +259,21 @@ static __always_inline bool steal_account_process_tick(void)
#ifdef CONFIG_PARAVIRT
if (static_key_false(&paravirt_steal_enabled)) {
u64 steal;
- cputime_t steal_ct;
+ unsigned long steal_jiffies;
steal = paravirt_steal_clock(smp_processor_id());
steal -= this_rq()->prev_steal_time;
/*
- * cputime_t may be less precise than nsecs (eg: if it's
- * based on jiffies). Lets cast the result to cputime
+ * steal is in nsecs but our caller is expecting steal
+ * time in jiffies. Lets cast the result to jiffies
* granularity and account the rest on the next rounds.
*/
- steal_ct = nsecs_to_cputime(steal);
- this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct);
+ steal_jiffies = nsecs_to_jiffies(steal);
+ this_rq()->prev_steal_time += jiffies_to_nsecs(steal_jiffies);
- account_steal_time(steal_ct);
- return steal_ct;
+ account_steal_time(jiffies_to_cputime(steal_jiffies));
+ return steal_jiffies;
}
#endif
return false;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b242775bf670..0517abd7dd73 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1770,3 +1770,16 @@ static inline u64 irq_time_read(int cpu)
}
#endif /* CONFIG_64BIT */
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+
+static inline void account_reset_rq(struct rq *rq)
+{
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ rq->prev_irq_time = 0;
+#endif
+#ifdef CONFIG_PARAVIRT
+ rq->prev_steal_time = 0;
+#endif
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+ rq->prev_steal_time_rq = 0;
+#endif
+}
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 580ac2d4024f..15a1795bbba1 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -316,24 +316,24 @@ static inline void seccomp_sync_threads(void)
put_seccomp_filter(thread);
smp_store_release(&thread->seccomp.filter,
caller->seccomp.filter);
+
+ /*
+ * Don't let an unprivileged task work around
+ * the no_new_privs restriction by creating
+ * a thread that sets it up, enters seccomp,
+ * then dies.
+ */
+ if (task_no_new_privs(caller))
+ task_set_no_new_privs(thread);
+
/*
* Opt the other thread into seccomp if needed.
* As threads are considered to be trust-realm
* equivalent (see ptrace_may_access), it is safe to
* allow one thread to transition the other.
*/
- if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) {
- /*
- * Don't let an unprivileged task work around
- * the no_new_privs restriction by creating
- * a thread that sets it up, enters seccomp,
- * then dies.
- */
- if (task_no_new_privs(caller))
- task_set_no_new_privs(thread);
-
+ if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
- }
}
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 11333311cf1c..b5a8e844a968 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1855,11 +1855,13 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
}
- if (prctl_map.exe_fd != (u32)-1)
+ if (prctl_map.exe_fd != (u32)-1) {
error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
- down_read(&mm->mmap_sem);
- if (error)
- goto out;
+ if (error)
+ return error;
+ }
+
+ down_write(&mm->mmap_sem);
/*
* We don't validate if these members are pointing to
@@ -1896,10 +1898,8 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
if (prctl_map.auxv_size)
memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
- error = 0;
-out:
- up_read(&mm->mmap_sem);
- return error;
+ up_write(&mm->mmap_sem);
+ return 0;
}
#endif /* CONFIG_CHECKPOINT_RESTORE */
@@ -1965,7 +1965,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
error = -EINVAL;
- down_read(&mm->mmap_sem);
+ down_write(&mm->mmap_sem);
vma = find_vma(mm, addr);
prctl_map.start_code = mm->start_code;
@@ -2058,7 +2058,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
error = 0;
out:
- up_read(&mm->mmap_sem);
+ up_write(&mm->mmap_sem);
return error;
}
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 7e7746a42a62..10a1d7dc9313 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1321,7 +1321,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
}
mnt = task_active_pid_ns(current)->proc_mnt;
- file = file_open_root(mnt->mnt_root, mnt, pathname, flags);
+ file = file_open_root(mnt->mnt_root, mnt, pathname, flags, 0);
result = PTR_ERR(file);
if (IS_ERR(file))
goto out_putname;
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 435b8850dd80..fa909f9fd559 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -897,10 +897,10 @@ static int enqueue_hrtimer(struct hrtimer *timer,
*/
static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_clock_base *base,
- unsigned long newstate, int reprogram)
+ u8 newstate, int reprogram)
{
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
- unsigned int state = timer->state;
+ u8 state = timer->state;
timer->state = newstate;
if (!(state & HRTIMER_STATE_ENQUEUED))
@@ -930,7 +930,7 @@ static inline int
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
{
if (hrtimer_is_queued(timer)) {
- unsigned long state = timer->state;
+ u8 state = timer->state;
int reprogram;
/*
@@ -954,6 +954,22 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool rest
return 0;
}
+static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
+ const enum hrtimer_mode mode)
+{
+#ifdef CONFIG_TIME_LOW_RES
+ /*
+ * CONFIG_TIME_LOW_RES indicates that the system has no way to return
+ * granular time values. For relative timers we add hrtimer_resolution
+ * (i.e. one jiffie) to prevent short timeouts.
+ */
+ timer->is_rel = mode & HRTIMER_MODE_REL;
+ if (timer->is_rel)
+ tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
+#endif
+ return tim;
+}
+
/**
* hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
* @timer: the timer to be added
@@ -974,19 +990,10 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
/* Remove an active timer from the queue: */
remove_hrtimer(timer, base, true);
- if (mode & HRTIMER_MODE_REL) {
+ if (mode & HRTIMER_MODE_REL)
tim = ktime_add_safe(tim, base->get_time());
- /*
- * CONFIG_TIME_LOW_RES is a temporary way for architectures
- * to signal that they simply return xtime in
- * do_gettimeoffset(). In this case we want to round up by
- * resolution when starting a relative timer, to avoid short
- * timeouts. This will go away with the GTOD framework.
- */
-#ifdef CONFIG_TIME_LOW_RES
- tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
-#endif
- }
+
+ tim = hrtimer_update_lowres(timer, tim, mode);
hrtimer_set_expires_range_ns(timer, tim, delta_ns);
@@ -1074,19 +1081,23 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
/**
* hrtimer_get_remaining - get remaining time for the timer
* @timer: the timer to read
+ * @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y
*/
-ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
+ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
{
unsigned long flags;
ktime_t rem;
lock_hrtimer_base(timer, &flags);
- rem = hrtimer_expires_remaining(timer);
+ if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
+ rem = hrtimer_expires_remaining_adjusted(timer);
+ else
+ rem = hrtimer_expires_remaining(timer);
unlock_hrtimer_base(timer, &flags);
return rem;
}
-EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
+EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
#ifdef CONFIG_NO_HZ_COMMON
/**
@@ -1220,6 +1231,14 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
fn = timer->function;
/*
+ * Clear the 'is relative' flag for the TIME_LOW_RES case. If the
+ * timer is restarted with a period then it becomes an absolute
+ * timer. If its not restarted it does not matter.
+ */
+ if (IS_ENABLED(CONFIG_TIME_LOW_RES))
+ timer->is_rel = false;
+
+ /*
* Because we run timers from hardirq context, there is no chance
* they get migrated to another cpu, therefore its safe to unlock
* the timer base.
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index 8d262b467573..1d5c7204ddc9 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -26,7 +26,7 @@
*/
static struct timeval itimer_get_remtime(struct hrtimer *timer)
{
- ktime_t rem = hrtimer_get_remaining(timer);
+ ktime_t rem = __hrtimer_get_remaining(timer, true);
/*
* Racy but safe: if the itimer expires after the above
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index ce033c7aa2e8..9cff0ab82b63 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
{
struct posix_clock *clk = get_posix_clock(fp);
- int result = 0;
+ unsigned int result = 0;
if (!clk)
- return -ENODEV;
+ return POLLERR;
if (clk->ops.poll)
result = clk->ops.poll(clk, fp, wait);
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 31d11ac9fa47..f2826c35e918 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -760,7 +760,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
(timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
- remaining = ktime_sub(hrtimer_get_expires(timer), now);
+ remaining = __hrtimer_expires_remaining_adjusted(timer, now);
/* Return 0 only, when the timer is expired and not pending */
if (remaining.tv64 <= 0) {
/*
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 7c7ec4515983..22c57e191a23 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -977,9 +977,9 @@ static void tick_nohz_switch_to_nohz(void)
/* Get the next period */
next = tick_init_jiffy_update();
- hrtimer_forward_now(&ts->sched_timer, tick_period);
hrtimer_set_expires(&ts->sched_timer, next);
- tick_program_event(next, 1);
+ hrtimer_forward_now(&ts->sched_timer, tick_period);
+ tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index d563c1960302..99188ee5d9d0 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -305,8 +305,7 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
delta = timekeeping_get_delta(tkr);
- nsec = delta * tkr->mult + tkr->xtime_nsec;
- nsec >>= tkr->shift;
+ nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift;
/* If arch requires, add in get_arch_timeoffset() */
return nsec + arch_gettimeoffset();
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index f75e35b60149..ba7d8b288bb3 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -69,7 +69,7 @@ print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
print_name_offset(m, taddr);
SEQ_printf(m, ", ");
print_name_offset(m, timer->function);
- SEQ_printf(m, ", S:%02lx", timer->state);
+ SEQ_printf(m, ", S:%02x", timer->state);
#ifdef CONFIG_TIMER_STATS
SEQ_printf(m, ", ");
print_name_offset(m, timer->start_site);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 5f375d4c05fb..dd4d86ae8e21 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1771,7 +1771,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
{
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(tr, buffer, flags, 6, pc, regs);
+ ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
ftrace_trace_userstack(buffer, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
@@ -5038,7 +5038,10 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
spd.nr_pages = i;
- ret = splice_to_pipe(pipe, &spd);
+ if (i)
+ ret = splice_to_pipe(pipe, &spd);
+ else
+ ret = 0;
out:
splice_shrink_spd(&spd);
return ret;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 4f6ef6912e00..d202d991edae 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -97,16 +97,16 @@ trace_find_event_field(struct trace_event_call *call, char *name)
struct ftrace_event_field *field;
struct list_head *head;
- field = __find_event_field(&ftrace_generic_fields, name);
+ head = trace_get_fields(call);
+ field = __find_event_field(head, name);
if (field)
return field;
- field = __find_event_field(&ftrace_common_fields, name);
+ field = __find_event_field(&ftrace_generic_fields, name);
if (field)
return field;
- head = trace_get_fields(call);
- return __find_event_field(head, name);
+ return __find_event_field(&ftrace_common_fields, name);
}
static int __trace_define_field(struct list_head *head, const char *type,
@@ -171,8 +171,10 @@ static int trace_define_generic_fields(void)
{
int ret;
- __generic_field(int, cpu, FILTER_OTHER);
- __generic_field(char *, comm, FILTER_PTR_STRING);
+ __generic_field(int, CPU, FILTER_CPU);
+ __generic_field(int, cpu, FILTER_CPU);
+ __generic_field(char *, COMM, FILTER_COMM);
+ __generic_field(char *, comm, FILTER_COMM);
return ret;
}
@@ -869,7 +871,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
* The ftrace subsystem is for showing formats only.
* They can not be enabled or disabled via the event files.
*/
- if (call->class && call->class->reg)
+ if (call->class && call->class->reg &&
+ !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
return file;
}
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index f93a219b18da..6816302542b2 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1043,13 +1043,14 @@ static int init_pred(struct filter_parse_state *ps,
return -EINVAL;
}
- if (is_string_field(field)) {
+ if (field->filter_type == FILTER_COMM) {
+ filter_build_regex(pred);
+ fn = filter_pred_comm;
+ pred->regex.field_len = TASK_COMM_LEN;
+ } else if (is_string_field(field)) {
filter_build_regex(pred);
- if (!strcmp(field->name, "comm")) {
- fn = filter_pred_comm;
- pred->regex.field_len = TASK_COMM_LEN;
- } else if (field->filter_type == FILTER_STATIC_STRING) {
+ if (field->filter_type == FILTER_STATIC_STRING) {
fn = filter_pred_string;
pred->regex.field_len = field->size;
} else if (field->filter_type == FILTER_DYN_STRING)
@@ -1072,7 +1073,7 @@ static int init_pred(struct filter_parse_state *ps,
}
pred->val = val;
- if (!strcmp(field->name, "cpu"))
+ if (field->filter_type == FILTER_CPU)
fn = filter_pred_cpu;
else
fn = select_comparison_fn(pred->op, field->size,
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index e4e56589ec1d..be3222b7d72e 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -109,8 +109,12 @@ static int func_prolog_dec(struct trace_array *tr,
return 0;
local_save_flags(*flags);
- /* slight chance to get a false positive on tracing_cpu */
- if (!irqs_disabled_flags(*flags))
+ /*
+ * Slight chance to get a false positive on tracing_cpu,
+ * although I'm starting to think there isn't a chance.
+ * Leave this for now just to be paranoid.
+ */
+ if (!irqs_disabled_flags(*flags) && !preempt_count())
return 0;
*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index 060df67dbdd1..f96f0383f6c6 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -296,6 +296,9 @@ static int t_show(struct seq_file *m, void *v)
const char *str = *fmt;
int i;
+ if (!*fmt)
+ return 0;
+
seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
/*
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index dda9e6742950..202df6cffcca 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -126,6 +126,13 @@ check_stack(unsigned long ip, unsigned long *stack)
}
/*
+ * Some archs may not have the passed in ip in the dump.
+ * If that happens, we need to show everything.
+ */
+ if (i == stack_trace_max.nr_entries)
+ i = 0;
+
+ /*
* Now find where in the stack these are.
*/
x = 0;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index e864906af3fc..1f1b05f5a94b 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -1020,6 +1020,9 @@ static int proc_watchdog_common(int which, struct ctl_table *table, int write,
* both lockup detectors are disabled if proc_watchdog_update()
* returns an error.
*/
+ if (old == new)
+ goto out;
+
err = proc_watchdog_update();
}
out:
@@ -1064,7 +1067,7 @@ int proc_soft_watchdog(struct ctl_table *table, int write,
int proc_watchdog_thresh(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- int err, old;
+ int err, old, new;
get_online_cpus();
mutex_lock(&watchdog_proc_mutex);
@@ -1084,6 +1087,10 @@ int proc_watchdog_thresh(struct ctl_table *table, int write,
/*
* Update the sample period. Restore on failure.
*/
+ new = ACCESS_ONCE(watchdog_thresh);
+ if (old == new)
+ goto out;
+
set_sample_period();
err = proc_watchdog_update();
if (err) {
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c579dbab2e36..450c21fd0e6e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -568,6 +568,16 @@ static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
int node)
{
assert_rcu_or_wq_mutex_or_pool_mutex(wq);
+
+ /*
+ * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
+ * delayed item is pending. The plan is to keep CPU -> NODE
+ * mapping valid and stable across CPU on/offlines. Once that
+ * happens, this workaround can be removed.
+ */
+ if (unlikely(node == NUMA_NO_NODE))
+ return wq->dfl_pwq;
+
return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
}
@@ -1458,13 +1468,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
timer_stats_timer_set_start_info(&dwork->timer);
dwork->wq = wq;
- /* timer isn't guaranteed to run in this cpu, record earlier */
- if (cpu == WORK_CPU_UNBOUND)
- cpu = raw_smp_processor_id();
dwork->cpu = cpu;
timer->expires = jiffies + delay;
- add_timer_on(timer, cpu);
+ if (unlikely(cpu != WORK_CPU_UNBOUND))
+ add_timer_on(timer, cpu);
+ else
+ add_timer(timer);
}
/**
diff --git a/lib/Kconfig b/lib/Kconfig
index f0df318104e7..1a48744253d7 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -210,9 +210,11 @@ config RANDOM32_SELFTEST
# compression support is select'ed if needed
#
config 842_COMPRESS
+ select CRC32
tristate
config 842_DECOMPRESS
+ select CRC32
tristate
config ZLIB_INFLATE
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index d34bd24c2c84..4a1515f4b452 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -1181,7 +1181,7 @@ static inline bool overlap(void *addr, unsigned long len, void *start, void *end
static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
{
- if (overlap(addr, len, _text, _etext) ||
+ if (overlap(addr, len, _stext, _etext) ||
overlap(addr, len, __start_rodata, __end_rodata))
err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
}
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 6745c6230db3..c30d07e99dba 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -25,6 +25,7 @@ static atomic_t dump_lock = ATOMIC_INIT(-1);
asmlinkage __visible void dump_stack(void)
{
+ unsigned long flags;
int was_locked;
int old;
int cpu;
@@ -33,9 +34,8 @@ asmlinkage __visible void dump_stack(void)
* Permit this cpu to perform nested stack dumps while serialising
* against other CPUs
*/
- preempt_disable();
-
retry:
+ local_irq_save(flags);
cpu = smp_processor_id();
old = atomic_cmpxchg(&dump_lock, -1, cpu);
if (old == -1) {
@@ -43,6 +43,7 @@ retry:
} else if (old == cpu) {
was_locked = 1;
} else {
+ local_irq_restore(flags);
cpu_relax();
goto retry;
}
@@ -52,7 +53,7 @@ retry:
if (!was_locked)
atomic_set(&dump_lock, -1);
- preempt_enable();
+ local_irq_restore(flags);
}
#else
asmlinkage __visible void dump_stack(void)
diff --git a/lib/klist.c b/lib/klist.c
index d74cf7a29afd..0507fa5d84c5 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -282,9 +282,9 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i,
struct klist_node *n)
{
i->i_klist = k;
- i->i_cur = n;
- if (n)
- kref_get(&n->n_ref);
+ i->i_cur = NULL;
+ if (n && kref_get_unless_zero(&n->n_ref))
+ i->i_cur = n;
}
EXPORT_SYMBOL_GPL(klist_iter_init_node);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 6a08ce7d6adc..acf9da449f81 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -74,3 +74,4 @@ module_exit(libcrc32c_mod_fini);
MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: crc32c");
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index fcf5d98574ce..6b79e9026e24 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1019,9 +1019,13 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
return 0;
radix_tree_for_each_slot(slot, root, &iter, first_index) {
- results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
+ results[ret] = rcu_dereference_raw(*slot);
if (!results[ret])
continue;
+ if (radix_tree_is_indirect_ptr(results[ret])) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
if (++ret == max_items)
break;
}
@@ -1098,9 +1102,13 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
return 0;
radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
- results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
+ results[ret] = rcu_dereference_raw(*slot);
if (!results[ret])
continue;
+ if (radix_tree_is_indirect_ptr(results[ret])) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
if (++ret == max_items)
break;
}
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 5939f63d90cd..5c88204b6f1f 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -43,50 +43,73 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
[STRING_UNITS_10] = 1000,
[STRING_UNITS_2] = 1024,
};
- int i, j;
- u32 remainder = 0, sf_cap, exp;
+ static const unsigned int rounding[] = { 500, 50, 5 };
+ int i = 0, j;
+ u32 remainder = 0, sf_cap;
char tmp[8];
const char *unit;
tmp[0] = '\0';
- i = 0;
- if (!size)
+
+ if (blk_size == 0)
+ size = 0;
+ if (size == 0)
goto out;
- while (blk_size >= divisor[units]) {
- remainder = do_div(blk_size, divisor[units]);
+ /* This is Napier's algorithm. Reduce the original block size to
+ *
+ * coefficient * divisor[units]^i
+ *
+ * we do the reduction so both coefficients are just under 32 bits so
+ * that multiplying them together won't overflow 64 bits and we keep
+ * as much precision as possible in the numbers.
+ *
+ * Note: it's safe to throw away the remainders here because all the
+ * precision is in the coefficients.
+ */
+ while (blk_size >> 32) {
+ do_div(blk_size, divisor[units]);
i++;
}
- exp = divisor[units] / (u32)blk_size;
- /*
- * size must be strictly greater than exp here to ensure that remainder
- * is greater than divisor[units] coming out of the if below.
- */
- if (size > exp) {
- remainder = do_div(size, divisor[units]);
- remainder *= blk_size;
+ while (size >> 32) {
+ do_div(size, divisor[units]);
i++;
- } else {
- remainder *= size;
}
+ /* now perform the actual multiplication keeping i as the sum of the
+ * two logarithms */
size *= blk_size;
- size += remainder / divisor[units];
- remainder %= divisor[units];
+ /* and logarithmically reduce it until it's just under the divisor */
while (size >= divisor[units]) {
remainder = do_div(size, divisor[units]);
i++;
}
+ /* work out in j how many digits of precision we need from the
+ * remainder */
sf_cap = size;
for (j = 0; sf_cap*10 < 1000; j++)
sf_cap *= 10;
- if (j) {
+ if (units == STRING_UNITS_2) {
+ /* express the remainder as a decimal. It's currently the
+ * numerator of a fraction whose denominator is
+ * divisor[units], which is 1 << 10 for STRING_UNITS_2 */
remainder *= 1000;
- remainder /= divisor[units];
+ remainder >>= 10;
+ }
+
+ /* add a 5 to the digit below what will be printed to ensure
+ * an arithmetical round up and carry it through to size */
+ remainder += rounding[j];
+ if (remainder >= 1000) {
+ remainder -= 1000;
+ size += 1;
+ }
+
+ if (j) {
snprintf(tmp, sizeof(tmp), ".%03u", remainder);
tmp[j+1] = '\0';
}
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
index 6f500ef2301d..f0b323abb4c6 100644
--- a/lib/ucs2_string.c
+++ b/lib/ucs2_string.c
@@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
}
}
EXPORT_SYMBOL(ucs2_strncmp);
+
+unsigned long
+ucs2_utf8size(const ucs2_char_t *src)
+{
+ unsigned long i;
+ unsigned long j = 0;
+
+ for (i = 0; i < ucs2_strlen(src); i++) {
+ u16 c = src[i];
+
+ if (c >= 0x800)
+ j += 3;
+ else if (c >= 0x80)
+ j += 2;
+ else
+ j += 1;
+ }
+
+ return j;
+}
+EXPORT_SYMBOL(ucs2_utf8size);
+
+/*
+ * copy at most maxlength bytes of whole utf8 characters to dest from the
+ * ucs2 string src.
+ *
+ * The return value is the number of characters copied, not including the
+ * final NUL character.
+ */
+unsigned long
+ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
+{
+ unsigned int i;
+ unsigned long j = 0;
+ unsigned long limit = ucs2_strnlen(src, maxlength);
+
+ for (i = 0; maxlength && i < limit; i++) {
+ u16 c = src[i];
+
+ if (c >= 0x800) {
+ if (maxlength < 3)
+ break;
+ maxlength -= 3;
+ dest[j++] = 0xe0 | (c & 0xf000) >> 12;
+ dest[j++] = 0x80 | (c & 0x0fc0) >> 6;
+ dest[j++] = 0x80 | (c & 0x003f);
+ } else if (c >= 0x80) {
+ if (maxlength < 2)
+ break;
+ maxlength -= 2;
+ dest[j++] = 0xc0 | (c & 0x7c0) >> 6;
+ dest[j++] = 0x80 | (c & 0x03f);
+ } else {
+ maxlength -= 1;
+ dest[j++] = c & 0x7f;
+ }
+ }
+ if (maxlength)
+ dest[j] = '\0';
+ return j;
+}
+EXPORT_SYMBOL(ucs2_as_utf8);
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 7340353f8aea..cbe6f0b96f29 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -989,7 +989,7 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout)
* here rather than calling cond_resched().
*/
if (current->flags & PF_WQ_WORKER)
- schedule_timeout(1);
+ schedule_timeout_uninterruptible(1);
else
cond_resched();
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index d3116be5a00f..300117f1a08f 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -61,6 +61,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
bool dequeued_page;
dequeued_page = false;
+ spin_lock_irqsave(&b_dev_info->pages_lock, flags);
list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
/*
* Block others from accessing the 'page' while we get around
@@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
continue;
}
#endif
- spin_lock_irqsave(&b_dev_info->pages_lock, flags);
balloon_page_delete(page);
__count_vm_event(BALLOON_DEFLATE);
- spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
unlock_page(page);
dequeued_page = true;
break;
}
}
+ spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
if (!dequeued_page) {
/*
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 5c714f33f494..7535ef32a75b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1332,7 +1332,7 @@ static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
return limit;
}
-static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
+static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
int order)
{
struct oom_control oc = {
@@ -1410,6 +1410,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
}
unlock:
mutex_unlock(&oom_lock);
+ return chosen;
}
#if MAX_NUMNODES > 1
@@ -5130,6 +5131,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+ unsigned long nr_pages;
unsigned long high;
int err;
@@ -5140,6 +5142,11 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
memcg->high = high;
+ nr_pages = page_counter_read(&memcg->memory);
+ if (nr_pages > high)
+ try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
+ GFP_KERNEL, true);
+
memcg_wb_domain_size_changed(memcg);
return nbytes;
}
@@ -5161,6 +5168,8 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+ unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
+ bool drained = false;
unsigned long max;
int err;
@@ -5169,9 +5178,36 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
if (err)
return err;
- err = mem_cgroup_resize_limit(memcg, max);
- if (err)
- return err;
+ xchg(&memcg->memory.limit, max);
+
+ for (;;) {
+ unsigned long nr_pages = page_counter_read(&memcg->memory);
+
+ if (nr_pages <= max)
+ break;
+
+ if (signal_pending(current)) {
+ err = -EINTR;
+ break;
+ }
+
+ if (!drained) {
+ drain_all_stock(memcg);
+ drained = true;
+ continue;
+ }
+
+ if (nr_reclaims) {
+ if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
+ GFP_KERNEL, true))
+ nr_reclaims--;
+ continue;
+ }
+
+ mem_cgroup_events(memcg, MEMCG_OOM, 1);
+ if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
+ break;
+ }
memcg_wb_domain_size_changed(memcg);
return nbytes;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 8424b64711ac..750b7893ee3a 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1572,7 +1572,7 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
* Did it turn free?
*/
ret = __get_any_page(page, pfn, 0);
- if (!PageLRU(page)) {
+ if (ret == 1 && !PageLRU(page)) {
/* Drop page reference which is from __get_any_page() */
put_hwpoison_page(page);
pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
diff --git a/mm/memory.c b/mm/memory.c
index c387430f06c3..b80bf4746b67 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3399,8 +3399,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(pmd_none(*pmd)) &&
unlikely(__pte_alloc(mm, vma, pmd, address)))
return VM_FAULT_OOM;
- /* if an huge pmd materialized from under us just retry later */
- if (unlikely(pmd_trans_huge(*pmd)))
+ /*
+ * If a huge pmd materialized under us just retry later. Use
+ * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
+ * didn't become pmd_trans_huge under us and then back to pmd_none, as
+ * a result of MADV_DONTNEED running immediately after a huge pmd fault
+ * in a different thread of this mm, in turn leading to a misleading
+ * pmd_trans_huge() retval. All we have to ensure is that it is a
+ * regular pmd that we can walk with pte_offset_map() and we can do that
+ * through an atomic read in C, which is what pmd_trans_unstable()
+ * provides.
+ */
+ if (unlikely(pmd_trans_unstable(pmd)))
return 0;
/*
* A regular pmd is established and it can't morph into a huge pmd
diff --git a/mm/migrate.c b/mm/migrate.c
index 7890d0bb5e23..6d17e0ab42d4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1578,7 +1578,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
(GFP_HIGHUSER_MOVABLE |
__GFP_THISNODE | __GFP_NOMEMALLOC |
__GFP_NORETRY | __GFP_NOWARN) &
- ~(__GFP_IO | __GFP_FS), 0);
+ ~__GFP_RECLAIM, 0);
return newpage;
}
diff --git a/mm/mlock.c b/mm/mlock.c
index a6617735f2f4..d843bc9d32dd 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -172,7 +172,7 @@ static void __munlock_isolation_failed(struct page *page)
*/
unsigned int munlock_vma_page(struct page *page)
{
- unsigned int nr_pages;
+ int nr_pages;
struct zone *zone = page_zone(page);
/* For try_to_munlock() and to serialize with page migration */
diff --git a/mm/mmap.c b/mm/mmap.c
index ee856266cc61..a089cca8d79a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -453,12 +453,16 @@ static void validate_mm(struct mm_struct *mm)
struct vm_area_struct *vma = mm->mmap;
while (vma) {
+ struct anon_vma *anon_vma = vma->anon_vma;
struct anon_vma_chain *avc;
- vma_lock_anon_vma(vma);
- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
- anon_vma_interval_tree_verify(avc);
- vma_unlock_anon_vma(vma);
+ if (anon_vma) {
+ anon_vma_lock_read(anon_vma);
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+ anon_vma_interval_tree_verify(avc);
+ anon_vma_unlock_read(anon_vma);
+ }
+
highest_address = vma->vm_end;
vma = vma->vm_next;
i++;
@@ -2168,32 +2172,27 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
- int error;
+ int error = 0;
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
- /*
- * We must make sure the anon_vma is allocated
- * so that the anon_vma locking is not a noop.
- */
+ /* Guard against wrapping around to address 0. */
+ if (address < PAGE_ALIGN(address+4))
+ address = PAGE_ALIGN(address+4);
+ else
+ return -ENOMEM;
+
+ /* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
- vma_lock_anon_vma(vma);
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
- * Also guard against wrapping around to address 0.
*/
- if (address < PAGE_ALIGN(address+4))
- address = PAGE_ALIGN(address+4);
- else {
- vma_unlock_anon_vma(vma);
- return -ENOMEM;
- }
- error = 0;
+ anon_vma_lock_write(vma->anon_vma);
/* Somebody else might have raced and expanded it already */
if (address > vma->vm_end) {
@@ -2211,7 +2210,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
* updates, but we only hold a shared mmap_sem
* lock here, so we need to protect against
* concurrent vma expansions.
- * vma_lock_anon_vma() doesn't help here, as
+ * anon_vma_lock_write() doesn't help here, as
* we don't guarantee that all growable vmas
* in a mm share the same root anon vma.
* So, we reuse mm->page_table_lock to guard
@@ -2235,7 +2234,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
}
}
}
- vma_unlock_anon_vma(vma);
+ anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(mm);
return error;
@@ -2251,25 +2250,21 @@ int expand_downwards(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
int error;
- /*
- * We must make sure the anon_vma is allocated
- * so that the anon_vma locking is not a noop.
- */
- if (unlikely(anon_vma_prepare(vma)))
- return -ENOMEM;
-
address &= PAGE_MASK;
error = security_mmap_addr(address);
if (error)
return error;
- vma_lock_anon_vma(vma);
+ /* We must make sure the anon_vma is allocated. */
+ if (unlikely(anon_vma_prepare(vma)))
+ return -ENOMEM;
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
*/
+ anon_vma_lock_write(vma->anon_vma);
/* Somebody else might have raced and expanded it already */
if (address < vma->vm_start) {
@@ -2287,7 +2282,7 @@ int expand_downwards(struct vm_area_struct *vma,
* updates, but we only hold a shared mmap_sem
* lock here, so we need to protect against
* concurrent vma expansions.
- * vma_lock_anon_vma() doesn't help here, as
+ * anon_vma_lock_write() doesn't help here, as
* we don't guarantee that all growable vmas
* in a mm share the same root anon vma.
* So, we reuse mm->page_table_lock to guard
@@ -2309,7 +2304,7 @@ int expand_downwards(struct vm_area_struct *vma,
}
}
}
- vma_unlock_anon_vma(vma);
+ anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(mm);
return error;
@@ -2695,12 +2690,29 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
if (!vma || !(vma->vm_flags & VM_SHARED))
goto out;
- if (start < vma->vm_start || start + size > vma->vm_end)
+ if (start < vma->vm_start)
goto out;
- if (pgoff == linear_page_index(vma, start)) {
- ret = 0;
- goto out;
+ if (start + size > vma->vm_end) {
+ struct vm_area_struct *next;
+
+ for (next = vma->vm_next; next; next = next->vm_next) {
+ /* hole between vmas ? */
+ if (next->vm_start != next->vm_prev->vm_end)
+ goto out;
+
+ if (next->vm_file != vma->vm_file)
+ goto out;
+
+ if (next->vm_flags != vma->vm_flags)
+ goto out;
+
+ if (start + size <= next->vm_end)
+ break;
+ }
+
+ if (!next)
+ goto out;
}
prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
@@ -2710,9 +2722,16 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
flags &= MAP_NONBLOCK;
flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
if (vma->vm_flags & VM_LOCKED) {
+ struct vm_area_struct *tmp;
flags |= MAP_LOCKED;
+
/* drop PG_Mlocked flag for over-mapped range */
- munlock_vma_pages_range(vma, start, start + size);
+ for (tmp = vma; tmp->vm_start >= start + size;
+ tmp = tmp->vm_next) {
+ munlock_vma_pages_range(tmp,
+ max(tmp->vm_start, start),
+ min(tmp->vm_end, start + size));
+ }
}
file = get_file(vma->vm_file);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 89ef8347900c..d58d6da73f98 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -674,34 +674,28 @@ static inline void __free_one_page(struct page *page,
unsigned long combined_idx;
unsigned long uninitialized_var(buddy_idx);
struct page *buddy;
- unsigned int max_order = MAX_ORDER;
+ unsigned int max_order;
+
+ max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
VM_BUG_ON(!zone_is_initialized(zone));
VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
VM_BUG_ON(migratetype == -1);
- if (is_migrate_isolate(migratetype)) {
- /*
- * We restrict max order of merging to prevent merge
- * between freepages on isolate pageblock and normal
- * pageblock. Without this, pageblock isolation
- * could cause incorrect freepage accounting.
- */
- max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
- } else {
+ if (likely(!is_migrate_isolate(migratetype)))
__mod_zone_freepage_state(zone, 1 << order, migratetype);
- }
- page_idx = pfn & ((1 << max_order) - 1);
+ page_idx = pfn & ((1 << MAX_ORDER) - 1);
VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
VM_BUG_ON_PAGE(bad_range(zone, page), page);
+continue_merging:
while (order < max_order - 1) {
buddy_idx = __find_buddy_index(page_idx, order);
buddy = page + (buddy_idx - page_idx);
if (!page_is_buddy(page, buddy, order))
- break;
+ goto done_merging;
/*
* Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
* merge with it and move up one order.
@@ -718,6 +712,32 @@ static inline void __free_one_page(struct page *page,
page_idx = combined_idx;
order++;
}
+ if (max_order < MAX_ORDER) {
+ /* If we are here, it means order is >= pageblock_order.
+ * We want to prevent merge between freepages on isolate
+ * pageblock and normal pageblock. Without this, pageblock
+ * isolation could cause incorrect freepage or CMA accounting.
+ *
+ * We don't want to hit this code for the more frequent
+ * low-order merging.
+ */
+ if (unlikely(has_isolate_pageblock(zone))) {
+ int buddy_mt;
+
+ buddy_idx = __find_buddy_index(page_idx, order);
+ buddy = page + (buddy_idx - page_idx);
+ buddy_mt = get_pageblock_migratetype(buddy);
+
+ if (migratetype != buddy_mt
+ && (is_migrate_isolate(migratetype) ||
+ is_migrate_isolate(buddy_mt)))
+ goto done_merging;
+ }
+ max_order++;
+ goto continue_merging;
+ }
+
+done_merging:
set_page_order(page, order);
/*
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 4568fd58f70a..00c96462cc36 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -283,11 +283,11 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
* now as a simple work-around, we use the next node for destination.
*/
if (PageHuge(page)) {
- nodemask_t src = nodemask_of_node(page_to_nid(page));
- nodemask_t dst;
- nodes_complement(dst, src);
+ int node = next_online_node(page_to_nid(page));
+ if (node == MAX_NUMNODES)
+ node = first_online_node;
return alloc_huge_page_node(page_hstate(compound_head(page)),
- next_node(page_to_nid(page), dst));
+ node);
}
if (PageHighMem(page))
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 7d3db0247983..1ba58213ad65 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -210,7 +210,9 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_trans_huge(*pmdp));
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
- flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+
+ /* collapse entails shooting down ptes not pmd */
+ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd;
}
#endif
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index e88d071648c2..5d453e58ddbf 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -194,7 +194,7 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
goto free_proc_pages;
}
- mm = mm_access(task, PTRACE_MODE_ATTACH);
+ mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
if (!mm || IS_ERR(mm)) {
rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
/*
diff --git a/mm/shmem.c b/mm/shmem.c
index 40d1d3b0ea2f..b82e56ebabdd 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -620,8 +620,7 @@ static void shmem_evict_inode(struct inode *inode)
list_del_init(&info->swaplist);
mutex_unlock(&shmem_swaplist_mutex);
}
- } else
- kfree(info->symlink);
+ }
simple_xattrs_free(&info->xattrs);
WARN_ON(inode->i_blocks);
@@ -2462,13 +2461,12 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
info = SHMEM_I(inode);
inode->i_size = len-1;
if (len <= SHORT_SYMLINK_LEN) {
- info->symlink = kmemdup(symname, len, GFP_KERNEL);
- if (!info->symlink) {
+ inode->i_link = kmemdup(symname, len, GFP_KERNEL);
+ if (!inode->i_link) {
iput(inode);
return -ENOMEM;
}
inode->i_op = &shmem_short_symlink_operations;
- inode->i_link = info->symlink;
} else {
error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
if (error) {
@@ -3083,6 +3081,7 @@ static struct inode *shmem_alloc_inode(struct super_block *sb)
static void shmem_destroy_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
+ kfree(inode->i_link);
kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
}
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 9f15bdd9163c..fc083996e40a 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -309,7 +309,12 @@ static void free_handle(struct zs_pool *pool, unsigned long handle)
static void record_obj(unsigned long handle, unsigned long obj)
{
- *(unsigned long *)handle = obj;
+ /*
+ * lsb of @obj represents handle lock while other bits
+ * represent object value the handle is pointing so
+ * updating shouldn't do store tearing.
+ */
+ WRITE_ONCE(*(unsigned long *)handle, obj);
}
/* zpool driver */
@@ -1635,6 +1640,13 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
free_obj = obj_malloc(d_page, class, handle);
zs_object_copy(free_obj, used_obj, class);
index++;
+ /*
+ * record_obj updates handle's value to free_obj and it will
+ * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
+ * breaks synchronization using pin_tag(e,g, zs_free) so
+ * let's keep the lock bit.
+ */
+ free_obj |= BIT(HANDLE_PIN_BIT);
record_obj(handle, free_obj);
unpin_tag(handle);
obj_free(pool, class, used_obj);
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index b563a3f5f2a8..2fa3be965101 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -228,8 +228,23 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
}
#endif
+static bool ax25_validate_header(const char *header, unsigned int len)
+{
+ ax25_digi digi;
+
+ if (!len)
+ return false;
+
+ if (header[0])
+ return true;
+
+ return ax25_addr_parse(header + 1, len - 1, NULL, NULL, &digi, NULL,
+ NULL);
+}
+
const struct header_ops ax25_header_ops = {
.create = ax25_hard_header,
+ .validate = ax25_validate_header,
};
EXPORT_SYMBOL(ax25_header_ops);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 191a70290dca..f5d2fe5e31cc 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -127,21 +127,17 @@ batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
}
/* finally deinitialize the claim */
-static void batadv_claim_free_rcu(struct rcu_head *rcu)
+static void batadv_claim_release(struct batadv_bla_claim *claim)
{
- struct batadv_bla_claim *claim;
-
- claim = container_of(rcu, struct batadv_bla_claim, rcu);
-
batadv_backbone_gw_free_ref(claim->backbone_gw);
- kfree(claim);
+ kfree_rcu(claim, rcu);
}
/* free a claim, call claim_free_rcu if its the last reference */
static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
{
if (atomic_dec_and_test(&claim->refcount))
- call_rcu(&claim->rcu, batadv_claim_free_rcu);
+ batadv_claim_release(claim);
}
/**
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 5a31420513e1..7b12ea8ea29d 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -75,18 +75,6 @@ batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface)
call_rcu(&hard_iface->rcu, batadv_hardif_free_rcu);
}
-/**
- * batadv_hardif_free_ref_now - decrement the hard interface refcounter and
- * possibly free it (without rcu callback)
- * @hard_iface: the hard interface to free
- */
-static inline void
-batadv_hardif_free_ref_now(struct batadv_hard_iface *hard_iface)
-{
- if (atomic_dec_and_test(&hard_iface->refcount))
- batadv_hardif_free_rcu(&hard_iface->rcu);
-}
-
static inline struct batadv_hard_iface *
batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
{
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index f5276be2c77c..d0956f726547 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -203,28 +203,25 @@ void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
}
/**
- * batadv_nc_node_free_rcu - rcu callback to free an nc node and remove
- * its refcount on the orig_node
- * @rcu: rcu pointer of the nc node
+ * batadv_nc_node_release - release nc_node from lists and queue for free after
+ * rcu grace period
+ * @nc_node: the nc node to free
*/
-static void batadv_nc_node_free_rcu(struct rcu_head *rcu)
+static void batadv_nc_node_release(struct batadv_nc_node *nc_node)
{
- struct batadv_nc_node *nc_node;
-
- nc_node = container_of(rcu, struct batadv_nc_node, rcu);
batadv_orig_node_free_ref(nc_node->orig_node);
- kfree(nc_node);
+ kfree_rcu(nc_node, rcu);
}
/**
- * batadv_nc_node_free_ref - decrements the nc node refcounter and possibly
- * frees it
+ * batadv_nc_node_free_ref - decrement the nc node refcounter and possibly
+ * release it
* @nc_node: the nc node to free
*/
static void batadv_nc_node_free_ref(struct batadv_nc_node *nc_node)
{
if (atomic_dec_and_test(&nc_node->refcount))
- call_rcu(&nc_node->rcu, batadv_nc_node_free_rcu);
+ batadv_nc_node_release(nc_node);
}
/**
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 7486df9ed48d..17851d3aaf22 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -163,92 +163,66 @@ err:
}
/**
- * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
- * @rcu: rcu pointer of the neigh_ifinfo object
- */
-static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
-{
- struct batadv_neigh_ifinfo *neigh_ifinfo;
-
- neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
-
- if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
- batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
-
- kfree(neigh_ifinfo);
-}
-
-/**
- * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
- * the neigh_ifinfo (without rcu callback)
+ * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
+ * free after rcu grace period
* @neigh_ifinfo: the neigh_ifinfo object to release
*/
static void
-batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
+batadv_neigh_ifinfo_release(struct batadv_neigh_ifinfo *neigh_ifinfo)
{
- if (atomic_dec_and_test(&neigh_ifinfo->refcount))
- batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
+ if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
+ batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
+
+ kfree_rcu(neigh_ifinfo, rcu);
}
/**
- * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
+ * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
* the neigh_ifinfo
* @neigh_ifinfo: the neigh_ifinfo object to release
*/
void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
{
if (atomic_dec_and_test(&neigh_ifinfo->refcount))
- call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
+ batadv_neigh_ifinfo_release(neigh_ifinfo);
}
/**
* batadv_neigh_node_free_rcu - free the neigh_node
- * @rcu: rcu pointer of the neigh_node
+ * batadv_neigh_node_release - release neigh_node from lists and queue for
+ * free after rcu grace period
+ * @neigh_node: neigh neighbor to free
*/
-static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
+static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node)
{
struct hlist_node *node_tmp;
- struct batadv_neigh_node *neigh_node;
struct batadv_neigh_ifinfo *neigh_ifinfo;
struct batadv_algo_ops *bao;
- neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
&neigh_node->ifinfo_list, list) {
- batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
+ batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
}
if (bao->bat_neigh_free)
bao->bat_neigh_free(neigh_node);
- batadv_hardif_free_ref_now(neigh_node->if_incoming);
+ batadv_hardif_free_ref(neigh_node->if_incoming);
- kfree(neigh_node);
-}
-
-/**
- * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
- * and possibly free it (without rcu callback)
- * @neigh_node: neigh neighbor to free
- */
-static void
-batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
-{
- if (atomic_dec_and_test(&neigh_node->refcount))
- batadv_neigh_node_free_rcu(&neigh_node->rcu);
+ kfree_rcu(neigh_node, rcu);
}
/**
* batadv_neigh_node_free_ref - decrement the neighbors refcounter
- * and possibly free it
+ * and possibly release it
* @neigh_node: neigh neighbor to free
*/
void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
{
if (atomic_dec_and_test(&neigh_node->refcount))
- call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
+ batadv_neigh_node_release(neigh_node);
}
/**
@@ -532,108 +506,99 @@ out:
}
/**
- * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
- * @rcu: rcu pointer of the orig_ifinfo object
+ * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
+ * free after rcu grace period
+ * @orig_ifinfo: the orig_ifinfo object to release
*/
-static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
+static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
{
- struct batadv_orig_ifinfo *orig_ifinfo;
struct batadv_neigh_node *router;
- orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
-
if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
- batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
+ batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
/* this is the last reference to this object */
router = rcu_dereference_protected(orig_ifinfo->router, true);
if (router)
- batadv_neigh_node_free_ref_now(router);
- kfree(orig_ifinfo);
+ batadv_neigh_node_free_ref(router);
+
+ kfree_rcu(orig_ifinfo, rcu);
}
/**
- * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
- * the orig_ifinfo (without rcu callback)
+ * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
+ * the orig_ifinfo
* @orig_ifinfo: the orig_ifinfo object to release
*/
-static void
-batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
+void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
{
if (atomic_dec_and_test(&orig_ifinfo->refcount))
- batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
+ batadv_orig_ifinfo_release(orig_ifinfo);
}
/**
- * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
- * the orig_ifinfo
- * @orig_ifinfo: the orig_ifinfo object to release
+ * batadv_orig_node_free_rcu - free the orig_node
+ * @rcu: rcu pointer of the orig_node
*/
-void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
+static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
{
- if (atomic_dec_and_test(&orig_ifinfo->refcount))
- call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
+ struct batadv_orig_node *orig_node;
+
+ orig_node = container_of(rcu, struct batadv_orig_node, rcu);
+
+ batadv_mcast_purge_orig(orig_node);
+
+ batadv_frag_purge_orig(orig_node, NULL);
+
+ if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
+ orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
+
+ kfree(orig_node->tt_buff);
+ kfree(orig_node);
}
-static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
+/**
+ * batadv_orig_node_release - release orig_node from lists and queue for
+ * free after rcu grace period
+ * @orig_node: the orig node to free
+ */
+static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
{
struct hlist_node *node_tmp;
struct batadv_neigh_node *neigh_node;
- struct batadv_orig_node *orig_node;
struct batadv_orig_ifinfo *orig_ifinfo;
- orig_node = container_of(rcu, struct batadv_orig_node, rcu);
-
spin_lock_bh(&orig_node->neigh_list_lock);
/* for all neighbors towards this originator ... */
hlist_for_each_entry_safe(neigh_node, node_tmp,
&orig_node->neigh_list, list) {
hlist_del_rcu(&neigh_node->list);
- batadv_neigh_node_free_ref_now(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
}
hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
&orig_node->ifinfo_list, list) {
hlist_del_rcu(&orig_ifinfo->list);
- batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
+ batadv_orig_ifinfo_free_ref(orig_ifinfo);
}
spin_unlock_bh(&orig_node->neigh_list_lock);
- batadv_mcast_purge_orig(orig_node);
-
/* Free nc_nodes */
batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
- batadv_frag_purge_orig(orig_node, NULL);
-
- if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
- orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
-
- kfree(orig_node->tt_buff);
- kfree(orig_node);
+ call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
}
/**
* batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
- * schedule an rcu callback for freeing it
+ * release it
* @orig_node: the orig node to free
*/
void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
{
if (atomic_dec_and_test(&orig_node->refcount))
- call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
-}
-
-/**
- * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
- * possibly free it (without rcu callback)
- * @orig_node: the orig node to free
- */
-void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
-{
- if (atomic_dec_and_test(&orig_node->refcount))
- batadv_orig_node_free_rcu(&orig_node->rcu);
+ batadv_orig_node_release(orig_node);
}
void batadv_originator_free(struct batadv_priv *bat_priv)
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index fa18f9bf266b..a5c37882b409 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -38,7 +38,6 @@ int batadv_originator_init(struct batadv_priv *bat_priv);
void batadv_originator_free(struct batadv_priv *bat_priv);
void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
-void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
const u8 *addr);
struct batadv_neigh_node *
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 76f19ba62462..83b0ca27a45e 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -240,20 +240,6 @@ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
return count;
}
-static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
-{
- struct batadv_tt_orig_list_entry *orig_entry;
-
- orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
-
- /* We are in an rcu callback here, therefore we cannot use
- * batadv_orig_node_free_ref() and its call_rcu():
- * An rcu_barrier() wouldn't wait for that to finish
- */
- batadv_orig_node_free_ref_now(orig_entry->orig_node);
- kfree(orig_entry);
-}
-
/**
* batadv_tt_local_size_mod - change the size by v of the local table identified
* by vid
@@ -349,13 +335,25 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
batadv_tt_global_size_mod(orig_node, vid, -1);
}
+/**
+ * batadv_tt_orig_list_entry_release - release tt orig entry from lists and
+ * queue for free after rcu grace period
+ * @orig_entry: tt orig entry to be free'd
+ */
+static void
+batadv_tt_orig_list_entry_release(struct batadv_tt_orig_list_entry *orig_entry)
+{
+ batadv_orig_node_free_ref(orig_entry->orig_node);
+ kfree_rcu(orig_entry, rcu);
+}
+
static void
batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
{
if (!atomic_dec_and_test(&orig_entry->refcount))
return;
- call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
+ batadv_tt_orig_list_entry_release(orig_entry);
}
/**
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 9e9cca3689a0..795ddd8b2f77 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -307,6 +307,9 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
/* check that it's our buffer */
if (lowpan_is_ipv6(*skb_network_header(skb))) {
+ /* Pull off the 1-byte of 6lowpan header. */
+ skb_pull(skb, 1);
+
/* Copy the packet so that the IPv6 header is
* properly aligned.
*/
@@ -317,6 +320,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
local_skb->protocol = htons(ETH_P_IPV6);
local_skb->pkt_type = PACKET_HOST;
+ local_skb->dev = dev;
skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
@@ -335,6 +339,8 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
if (!local_skb)
goto drop;
+ local_skb->dev = dev;
+
ret = iphc_decompress(local_skb, dev, chan);
if (ret < 0) {
kfree_skb(local_skb);
@@ -343,7 +349,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
local_skb->protocol = htons(ETH_P_IPV6);
local_skb->pkt_type = PACKET_HOST;
- local_skb->dev = dev;
if (give_skb_to_upper(local_skb, dev)
!= NET_RX_SUCCESS) {
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 85b82f7adbd2..24e9410923d0 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -722,8 +722,12 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
if (hci_update_random_address(req, false, &own_addr_type))
return;
+ /* Set window to be the same value as the interval to enable
+ * continuous scanning.
+ */
cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
- cp.scan_window = cpu_to_le16(hdev->le_scan_window);
+ cp.scan_window = cp.scan_interval;
+
bacpy(&cp.peer_addr, &conn->dst);
cp.peer_addr_type = conn->dst_type;
cp.own_address_type = own_addr_type;
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 981f8a202c27..02778c5bc149 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -175,21 +175,29 @@ static u8 update_white_list(struct hci_request *req)
* command to remove it from the controller.
*/
list_for_each_entry(b, &hdev->le_white_list, list) {
- struct hci_cp_le_del_from_white_list cp;
+ /* If the device is neither in pend_le_conns nor
+ * pend_le_reports then remove it from the whitelist.
+ */
+ if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
+ &b->bdaddr, b->bdaddr_type) &&
+ !hci_pend_le_action_lookup(&hdev->pend_le_reports,
+ &b->bdaddr, b->bdaddr_type)) {
+ struct hci_cp_le_del_from_white_list cp;
+
+ cp.bdaddr_type = b->bdaddr_type;
+ bacpy(&cp.bdaddr, &b->bdaddr);
- if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
- &b->bdaddr, b->bdaddr_type) ||
- hci_pend_le_action_lookup(&hdev->pend_le_reports,
- &b->bdaddr, b->bdaddr_type)) {
- white_list_entries++;
+ hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
+ sizeof(cp), &cp);
continue;
}
- cp.bdaddr_type = b->bdaddr_type;
- bacpy(&cp.bdaddr, &b->bdaddr);
+ if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
+ /* White list can not be used with RPAs */
+ return 0x00;
+ }
- hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
- sizeof(cp), &cp);
+ white_list_entries++;
}
/* Since all no longer valid white list entries have been
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7f22119276f3..b1b0a1c0bd8d 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -7155,6 +7155,10 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
status);
+ if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+ MGMT_STATUS_INVALID_PARAMS);
+
flags = __le32_to_cpu(cp->flags);
timeout = __le16_to_cpu(cp->timeout);
duration = __le16_to_cpu(cp->duration);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index ffed8a1d4f27..4b175df35184 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -1072,22 +1072,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
hcon->dst_type = smp->remote_irk->addr_type;
queue_work(hdev->workqueue, &conn->id_addr_update_work);
}
-
- /* When receiving an indentity resolving key for
- * a remote device that does not use a resolvable
- * private address, just remove the key so that
- * it is possible to use the controller white
- * list for scanning.
- *
- * Userspace will have been told to not store
- * this key at this point. So it is safe to
- * just remove it.
- */
- if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) {
- list_del_rcu(&smp->remote_irk->list);
- kfree_rcu(smp->remote_irk, rcu);
- smp->remote_irk = NULL;
- }
}
if (smp->csrk) {
diff --git a/net/bridge/br.c b/net/bridge/br.c
index a1abe4936fe1..3addc05b9a16 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -121,6 +121,7 @@ static struct notifier_block br_device_notifier = {
.notifier_call = br_device_event
};
+/* called with RTNL */
static int br_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
@@ -130,7 +131,6 @@ static int br_switchdev_event(struct notifier_block *unused,
struct switchdev_notifier_fdb_info *fdb_info;
int err = NOTIFY_DONE;
- rtnl_lock();
p = br_port_get_rtnl(dev);
if (!p)
goto out;
@@ -155,7 +155,6 @@ static int br_switchdev_event(struct notifier_block *unused,
}
out:
- rtnl_unlock();
return err;
}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 689b8412c58e..0346c215ff6a 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -28,6 +28,8 @@
const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
EXPORT_SYMBOL_GPL(nf_br_ops);
+static struct lock_class_key bridge_netdev_addr_lock_key;
+
/* net device transmit always called with BH disabled */
netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -88,6 +90,11 @@ out:
return NETDEV_TX_OK;
}
+static void br_set_lockdep_class(struct net_device *dev)
+{
+ lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
+}
+
static int br_dev_init(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
@@ -100,6 +107,7 @@ static int br_dev_init(struct net_device *dev)
err = br_vlan_init(br);
if (err)
free_percpu(br->stats);
+ br_set_lockdep_class(dev);
return err;
}
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 5f3f64553179..eff69cb270d2 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -567,6 +567,14 @@ int br_set_max_age(struct net_bridge *br, unsigned long val)
}
+/* Set time interval that dynamic forwarding entries live
+ * For pure software bridge, allow values outside the 802.1
+ * standard specification for special cases:
+ * 0 - entry never ages (all permanant)
+ * 1 - entry disappears (no persistance)
+ *
+ * Offloaded switch entries maybe more restrictive
+ */
int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
{
struct switchdev_attr attr = {
@@ -577,11 +585,8 @@ int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
unsigned long t = clock_t_to_jiffies(ageing_time);
int err;
- if (t < BR_MIN_AGEING_TIME || t > BR_MAX_AGEING_TIME)
- return -ERANGE;
-
err = switchdev_port_attr_set(br->dev, &attr);
- if (err)
+ if (err && err != -EOPNOTSUPP)
return err;
br->ageing_time = t;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 9981039ef4ff..63ae5dd24fc5 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -672,6 +672,8 @@ static void reset_connection(struct ceph_connection *con)
}
con->in_seq = 0;
con->in_seq_acked = 0;
+
+ con->out_skip = 0;
}
/*
@@ -771,6 +773,8 @@ static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
static void con_out_kvec_reset(struct ceph_connection *con)
{
+ BUG_ON(con->out_skip);
+
con->out_kvec_left = 0;
con->out_kvec_bytes = 0;
con->out_kvec_cur = &con->out_kvec[0];
@@ -779,9 +783,9 @@ static void con_out_kvec_reset(struct ceph_connection *con)
static void con_out_kvec_add(struct ceph_connection *con,
size_t size, void *data)
{
- int index;
+ int index = con->out_kvec_left;
- index = con->out_kvec_left;
+ BUG_ON(con->out_skip);
BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
con->out_kvec[index].iov_len = size;
@@ -790,6 +794,27 @@ static void con_out_kvec_add(struct ceph_connection *con,
con->out_kvec_bytes += size;
}
+/*
+ * Chop off a kvec from the end. Return residual number of bytes for
+ * that kvec, i.e. how many bytes would have been written if the kvec
+ * hadn't been nuked.
+ */
+static int con_out_kvec_skip(struct ceph_connection *con)
+{
+ int off = con->out_kvec_cur - con->out_kvec;
+ int skip = 0;
+
+ if (con->out_kvec_bytes > 0) {
+ skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
+ BUG_ON(con->out_kvec_bytes < skip);
+ BUG_ON(!con->out_kvec_left);
+ con->out_kvec_bytes -= skip;
+ con->out_kvec_left--;
+ }
+
+ return skip;
+}
+
#ifdef CONFIG_BLOCK
/*
@@ -1175,6 +1200,13 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
return new_piece;
}
+static size_t sizeof_footer(struct ceph_connection *con)
+{
+ return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
+ sizeof(struct ceph_msg_footer) :
+ sizeof(struct ceph_msg_footer_old);
+}
+
static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
{
BUG_ON(!msg);
@@ -1197,7 +1229,6 @@ static void prepare_write_message_footer(struct ceph_connection *con)
m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
dout("prepare_write_message_footer %p\n", con);
- con->out_kvec_is_msg = true;
con->out_kvec[v].iov_base = &m->footer;
if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
if (con->ops->sign_message)
@@ -1225,7 +1256,6 @@ static void prepare_write_message(struct ceph_connection *con)
u32 crc;
con_out_kvec_reset(con);
- con->out_kvec_is_msg = true;
con->out_msg_done = false;
/* Sneak an ack in there first? If we can get it into the same
@@ -1265,18 +1295,19 @@ static void prepare_write_message(struct ceph_connection *con)
/* tag + hdr + front + middle */
con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
- con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
+ con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
if (m->middle)
con_out_kvec_add(con, m->middle->vec.iov_len,
m->middle->vec.iov_base);
- /* fill in crc (except data pages), footer */
+ /* fill in hdr crc and finalize hdr */
crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
con->out_msg->hdr.crc = cpu_to_le32(crc);
- con->out_msg->footer.flags = 0;
+ memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
+ /* fill in front and middle crc, footer */
crc = crc32c(0, m->front.iov_base, m->front.iov_len);
con->out_msg->footer.front_crc = cpu_to_le32(crc);
if (m->middle) {
@@ -1288,6 +1319,7 @@ static void prepare_write_message(struct ceph_connection *con)
dout("%s front_crc %u middle_crc %u\n", __func__,
le32_to_cpu(con->out_msg->footer.front_crc),
le32_to_cpu(con->out_msg->footer.middle_crc));
+ con->out_msg->footer.flags = 0;
/* is there a data payload? */
con->out_msg->footer.data_crc = 0;
@@ -1492,7 +1524,6 @@ static int write_partial_kvec(struct ceph_connection *con)
}
}
con->out_kvec_left = 0;
- con->out_kvec_is_msg = false;
ret = 1;
out:
dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
@@ -1584,6 +1615,7 @@ static int write_partial_skip(struct ceph_connection *con)
{
int ret;
+ dout("%s %p %d left\n", __func__, con, con->out_skip);
while (con->out_skip > 0) {
size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
@@ -2313,9 +2345,9 @@ static int read_partial_message(struct ceph_connection *con)
ceph_pr_addr(&con->peer_addr.in_addr),
seq, con->in_seq + 1);
con->in_base_pos = -front_len - middle_len - data_len -
- sizeof(m->footer);
+ sizeof_footer(con);
con->in_tag = CEPH_MSGR_TAG_READY;
- return 0;
+ return 1;
} else if ((s64)seq - (s64)con->in_seq > 1) {
pr_err("read_partial_message bad seq %lld expected %lld\n",
seq, con->in_seq + 1);
@@ -2338,10 +2370,10 @@ static int read_partial_message(struct ceph_connection *con)
/* skip this message */
dout("alloc_msg said skip message\n");
con->in_base_pos = -front_len - middle_len - data_len -
- sizeof(m->footer);
+ sizeof_footer(con);
con->in_tag = CEPH_MSGR_TAG_READY;
con->in_seq++;
- return 0;
+ return 1;
}
BUG_ON(!con->in_msg);
@@ -2506,13 +2538,13 @@ more:
more_kvec:
/* kvec data queued? */
- if (con->out_skip) {
- ret = write_partial_skip(con);
+ if (con->out_kvec_left) {
+ ret = write_partial_kvec(con);
if (ret <= 0)
goto out;
}
- if (con->out_kvec_left) {
- ret = write_partial_kvec(con);
+ if (con->out_skip) {
+ ret = write_partial_skip(con);
if (ret <= 0)
goto out;
}
@@ -3050,16 +3082,31 @@ void ceph_msg_revoke(struct ceph_msg *msg)
ceph_msg_put(msg);
}
if (con->out_msg == msg) {
- dout("%s %p msg %p - was sending\n", __func__, con, msg);
- con->out_msg = NULL;
- if (con->out_kvec_is_msg) {
- con->out_skip = con->out_kvec_bytes;
- con->out_kvec_is_msg = false;
+ BUG_ON(con->out_skip);
+ /* footer */
+ if (con->out_msg_done) {
+ con->out_skip += con_out_kvec_skip(con);
+ } else {
+ BUG_ON(!msg->data_length);
+ if (con->peer_features & CEPH_FEATURE_MSG_AUTH)
+ con->out_skip += sizeof(msg->footer);
+ else
+ con->out_skip += sizeof(msg->old_footer);
}
+ /* data, middle, front */
+ if (msg->data_length)
+ con->out_skip += msg->cursor.total_resid;
+ if (msg->middle)
+ con->out_skip += con_out_kvec_skip(con);
+ con->out_skip += con_out_kvec_skip(con);
+
+ dout("%s %p msg %p - was sending, will write %d skip %d\n",
+ __func__, con, msg, con->out_kvec_bytes, con->out_skip);
msg->hdr.seq = 0;
-
+ con->out_msg = NULL;
ceph_msg_put(msg);
}
+
mutex_unlock(&con->mutex);
}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index f8f235930d88..a28e47ff1b1b 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2843,8 +2843,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
mutex_lock(&osdc->request_mutex);
req = __lookup_request(osdc, tid);
if (!req) {
- pr_warn("%s osd%d tid %llu unknown, skipping\n",
- __func__, osd->o_osd, tid);
+ dout("%s osd%d tid %llu unknown, skipping\n", __func__,
+ osd->o_osd, tid);
m = NULL;
*skip = 1;
goto out;
diff --git a/net/core/dev.c b/net/core/dev.c
index ae00b894e675..9efbdb3ff78a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2542,6 +2542,8 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
*
* It may return NULL if the skb requires no segmentation. This is
* only possible when GSO is used for verifying header integrity.
+ *
+ * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
*/
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path)
@@ -2556,6 +2558,9 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
return ERR_PTR(err);
}
+ BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
+ sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
+
SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
SKB_GSO_CB(skb)->encap_level = 0;
@@ -4140,6 +4145,7 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
diffs |= p->vlan_tci ^ skb->vlan_tci;
+ diffs |= skb_metadata_dst_cmp(p, skb);
if (maclen == ETH_HLEN)
diffs |= compare_ether_header(skb_mac_header(p),
skb_mac_header(skb));
@@ -4337,10 +4343,12 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
break;
case GRO_MERGED_FREE:
- if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
+ skb_dst_drop(skb);
kmem_cache_free(skbuff_head_cache, skb);
- else
+ } else {
__kfree_skb(skb);
+ }
break;
case GRO_HELD:
@@ -7120,8 +7128,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
setup(dev);
- if (!dev->tx_queue_len)
+ if (!dev->tx_queue_len) {
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->tx_queue_len = 1;
+ }
dev->num_tx_queues = txqs;
dev->real_num_tx_queues = txqs;
diff --git a/net/core/filter.c b/net/core/filter.c
index 672eefbfbe99..f393a22b9d50 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -777,6 +777,11 @@ static int bpf_check_classic(const struct sock_filter *filter,
if (ftest->k == 0)
return -EINVAL;
break;
+ case BPF_ALU | BPF_LSH | BPF_K:
+ case BPF_ALU | BPF_RSH | BPF_K:
+ if (ftest->k >= 32)
+ return -EINVAL;
+ break;
case BPF_LD | BPF_MEM:
case BPF_LDX | BPF_MEM:
case BPF_ST:
@@ -1134,7 +1139,8 @@ void bpf_prog_destroy(struct bpf_prog *fp)
}
EXPORT_SYMBOL_GPL(bpf_prog_destroy);
-static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
+static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk,
+ bool locked)
{
struct sk_filter *fp, *old_fp;
@@ -1150,10 +1156,8 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
return -ENOMEM;
}
- old_fp = rcu_dereference_protected(sk->sk_filter,
- sock_owned_by_user(sk));
+ old_fp = rcu_dereference_protected(sk->sk_filter, locked);
rcu_assign_pointer(sk->sk_filter, fp);
-
if (old_fp)
sk_filter_uncharge(sk, old_fp);
@@ -1170,7 +1174,8 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
* occurs or there is insufficient memory for the filter a negative
* errno code is returned. On success the return is zero.
*/
-int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
+ bool locked)
{
unsigned int fsize = bpf_classic_proglen(fprog);
unsigned int bpf_fsize = bpf_prog_size(fprog->len);
@@ -1208,7 +1213,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
if (IS_ERR(prog))
return PTR_ERR(prog);
- err = __sk_attach_prog(prog, sk);
+ err = __sk_attach_prog(prog, sk, locked);
if (err < 0) {
__bpf_prog_release(prog);
return err;
@@ -1216,7 +1221,12 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
return 0;
}
-EXPORT_SYMBOL_GPL(sk_attach_filter);
+EXPORT_SYMBOL_GPL(__sk_attach_filter);
+
+int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+{
+ return __sk_attach_filter(fprog, sk, sock_owned_by_user(sk));
+}
int sk_attach_bpf(u32 ufd, struct sock *sk)
{
@@ -1235,7 +1245,7 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
return -EINVAL;
}
- err = __sk_attach_prog(prog, sk);
+ err = __sk_attach_prog(prog, sk, sock_owned_by_user(sk));
if (err < 0) {
bpf_prog_put(prog);
return err;
@@ -1908,7 +1918,7 @@ static int __init register_sk_filter_ops(void)
}
late_initcall(register_sk_filter_ops);
-int sk_detach_filter(struct sock *sk)
+int __sk_detach_filter(struct sock *sk, bool locked)
{
int ret = -ENOENT;
struct sk_filter *filter;
@@ -1916,8 +1926,7 @@ int sk_detach_filter(struct sock *sk)
if (sock_flag(sk, SOCK_FILTER_LOCKED))
return -EPERM;
- filter = rcu_dereference_protected(sk->sk_filter,
- sock_owned_by_user(sk));
+ filter = rcu_dereference_protected(sk->sk_filter, locked);
if (filter) {
RCU_INIT_POINTER(sk->sk_filter, NULL);
sk_filter_uncharge(sk, filter);
@@ -1926,7 +1935,12 @@ int sk_detach_filter(struct sock *sk)
return ret;
}
-EXPORT_SYMBOL_GPL(sk_detach_filter);
+EXPORT_SYMBOL_GPL(__sk_detach_filter);
+
+int sk_detach_filter(struct sock *sk)
+{
+ return __sk_detach_filter(sk, sock_owned_by_user(sk));
+}
int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
unsigned int len)
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index d79699c9d1b9..12e700332010 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -208,7 +208,6 @@ ip:
case htons(ETH_P_IPV6): {
const struct ipv6hdr *iph;
struct ipv6hdr _iph;
- __be32 flow_label;
ipv6:
iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
@@ -230,8 +229,12 @@ ipv6:
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
}
- flow_label = ip6_flowlabel(iph);
- if (flow_label) {
+ if ((dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
+ (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
+ ip6_flowlabel(iph)) {
+ __be32 flow_label = ip6_flowlabel(iph);
+
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
key_tags = skb_flow_dissector_target(flow_dissector,
@@ -396,6 +399,13 @@ ip_proto_again:
goto out_bad;
proto = eth->h_proto;
nhoff += sizeof(*eth);
+
+ /* Cap headers that we access via pointers at the
+ * end of the Ethernet header as our maximum alignment
+ * at that point is only 2 bytes.
+ */
+ if (NET_IP_ALIGN)
+ hlen = nhoff;
}
key_control->flags |= FLOW_DIS_ENCAPSULATION;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index de8d5cc5eb24..4da4d51a2ccf 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2787,7 +2787,9 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
} else {
skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
}
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+
+ if (likely(skb))
+ skb_reserve(skb, LL_RESERVED_SPACE(dev));
return skb;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 34ba7a08876d..ca966f7de351 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -905,6 +905,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
+ nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
+ nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
+ + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
+ nla_total_size(1); /* IFLA_PROTO_DOWN */
}
diff --git a/net/core/scm.c b/net/core/scm.c
index 8a1741b14302..dce0acb929f1 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
*fplp = fpl;
fpl->count = 0;
fpl->max = SCM_MAX_FD;
+ fpl->user = NULL;
}
fpp = &fpl->fp[fpl->count];
@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
*fpp++ = file;
fpl->count++;
}
+
+ if (!fpl->user)
+ fpl->user = get_uid(current_user());
+
return num;
}
@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
scm->fp = NULL;
for (i=fpl->count-1; i>=0; i--)
fput(fpl->fp[i]);
+ free_uid(fpl->user);
kfree(fpl);
}
}
@@ -336,6 +342,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
for (i = 0; i < fpl->count; i++)
get_file(fpl->fp[i]);
new_fpl->max = new_fpl->count;
+ new_fpl->user = get_uid(fpl->user);
}
return new_fpl;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b2df375ec9c2..8616d1147c93 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -79,6 +79,8 @@
struct kmem_cache *skbuff_head_cache __read_mostly;
static struct kmem_cache *skbuff_fclone_cache __read_mostly;
+int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
+EXPORT_SYMBOL(sysctl_max_skb_frags);
/**
* skb_panic - private function for out-of-line support
@@ -2946,6 +2948,24 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
EXPORT_SYMBOL_GPL(skb_append_pagefrags);
/**
+ * skb_push_rcsum - push skb and update receive checksum
+ * @skb: buffer to update
+ * @len: length of data pulled
+ *
+ * This function performs an skb_push on the packet and updates
+ * the CHECKSUM_COMPLETE checksum. It should be used on
+ * receive path processing instead of skb_push unless you know
+ * that the checksum difference is zero (e.g., a valid IP header)
+ * or you are setting ip_summed to CHECKSUM_NONE.
+ */
+static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
+{
+ skb_push(skb, len);
+ skb_postpush_rcsum(skb, skb->data, len);
+ return skb->data;
+}
+
+/**
* skb_pull_rcsum - pull skb and update receive checksum
* @skb: buffer to update
* @len: length of data pulled
@@ -4082,9 +4102,9 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
if (!pskb_may_pull(skb_chk, offset))
goto err;
- __skb_pull(skb_chk, offset);
+ skb_pull_rcsum(skb_chk, offset);
ret = skb_chkf(skb_chk);
- __skb_push(skb_chk, offset);
+ skb_push_rcsum(skb_chk, offset);
if (ret)
goto err;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 95b6139d710c..a6beb7b6ae55 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -26,6 +26,7 @@ static int zero = 0;
static int one = 1;
static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
+static int max_skb_frags = MAX_SKB_FRAGS;
static int net_msg_warn; /* Unused, but still a sysctl */
@@ -392,6 +393,15 @@ static struct ctl_table net_core_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "max_skb_frags",
+ .data = &sysctl_max_skb_frags,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &one,
+ .extra2 = &max_skb_frags,
+ },
{ }
};
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 5684e14932bd..8be8f27bfacc 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -204,8 +204,6 @@ void dccp_req_err(struct sock *sk, u64 seq)
* ICMPs are not backlogged, hence we cannot get an established
* socket here.
*/
- WARN_ON(req->sk);
-
if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
} else {
@@ -824,26 +822,26 @@ lookup:
if (sk->sk_state == DCCP_NEW_SYN_RECV) {
struct request_sock *req = inet_reqsk(sk);
- struct sock *nsk = NULL;
+ struct sock *nsk;
sk = req->rsk_listener;
- if (likely(sk->sk_state == DCCP_LISTEN)) {
- nsk = dccp_check_req(sk, skb, req);
- } else {
+ if (unlikely(sk->sk_state != DCCP_LISTEN)) {
inet_csk_reqsk_queue_drop_and_put(sk, req);
goto lookup;
}
+ sock_hold(sk);
+ nsk = dccp_check_req(sk, skb, req);
if (!nsk) {
reqsk_put(req);
- goto discard_it;
+ goto discard_and_relse;
}
if (nsk == sk) {
- sock_hold(sk);
reqsk_put(req);
} else if (dccp_child_process(sk, nsk, skb)) {
dccp_v4_ctl_send_reset(sk, skb);
- goto discard_it;
+ goto discard_and_relse;
} else {
+ sock_put(sk);
return 0;
}
}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 9c6d0508e63a..b8608b71a66d 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -691,26 +691,26 @@ lookup:
if (sk->sk_state == DCCP_NEW_SYN_RECV) {
struct request_sock *req = inet_reqsk(sk);
- struct sock *nsk = NULL;
+ struct sock *nsk;
sk = req->rsk_listener;
- if (likely(sk->sk_state == DCCP_LISTEN)) {
- nsk = dccp_check_req(sk, skb, req);
- } else {
+ if (unlikely(sk->sk_state != DCCP_LISTEN)) {
inet_csk_reqsk_queue_drop_and_put(sk, req);
goto lookup;
}
+ sock_hold(sk);
+ nsk = dccp_check_req(sk, skb, req);
if (!nsk) {
reqsk_put(req);
- goto discard_it;
+ goto discard_and_relse;
}
if (nsk == sk) {
- sock_hold(sk);
reqsk_put(req);
} else if (dccp_child_process(sk, nsk, skb)) {
dccp_v6_ctl_send_reset(sk, skb);
- goto discard_it;
+ goto discard_and_relse;
} else {
+ sock_put(sk);
return 0;
}
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index cebd9d31e65a..0212591b0077 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -334,6 +334,9 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
ASSERT_RTNL();
+ if (in_dev->dead)
+ goto no_promotions;
+
/* 1. Deleting primary ifaddr forces deletion all secondaries
* unless alias promotion is set
**/
@@ -380,6 +383,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
fib_del_ifaddr(ifa, ifa1);
}
+no_promotions:
/* 2. Unlink it */
*ifap = ifa1->ifa_next;
@@ -1847,7 +1851,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
if (err < 0)
goto errout;
- err = EINVAL;
+ err = -EINVAL;
if (!tb[NETCONFA_IFINDEX])
goto errout;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index e10edb5e78b0..f97ae9d93ee9 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -280,7 +280,6 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
struct in_device *in_dev;
struct fib_result res;
struct rtable *rt;
- struct flowi4 fl4;
struct net *net;
int scope;
@@ -296,14 +295,13 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
scope = RT_SCOPE_UNIVERSE;
if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
- fl4.flowi4_oif = 0;
- fl4.flowi4_iif = LOOPBACK_IFINDEX;
- fl4.daddr = ip_hdr(skb)->saddr;
- fl4.saddr = 0;
- fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
- fl4.flowi4_scope = scope;
- fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
- fl4.flowi4_tun_key.tun_id = 0;
+ struct flowi4 fl4 = {
+ .flowi4_iif = LOOPBACK_IFINDEX,
+ .daddr = ip_hdr(skb)->saddr,
+ .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
+ .flowi4_scope = scope,
+ .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0,
+ };
if (!fib_lookup(net, &fl4, &res, 0))
return FIB_RES_PREFSRC(net, res);
} else {
@@ -923,6 +921,9 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
subnet = 1;
}
+ if (in_dev->dead)
+ goto no_promotions;
+
/* Deletion is more complicated than add.
* We should take care of not to delete too much :-)
*
@@ -998,6 +999,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
}
}
+no_promotions:
if (!(ok & BRD_OK))
fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
if (subnet && ifa->ifa_prefixlen < 31) {
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 05e4cba14162..b3086cf27027 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -356,9 +356,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
skb_dst_set(skb, &rt->dst);
skb->dev = dev;
- skb->reserved_tailroom = skb_end_offset(skb) -
- min(mtu, skb_end_offset(skb));
skb_reserve(skb, hlen);
+ skb_tailroom_reserve(skb, mtu, tlen);
skb_reset_network_header(skb);
pip = ip_hdr(skb);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 728414dcea3b..030cd09dd2a2 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -789,14 +789,16 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
reqsk_put(req);
}
-void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
- struct sock *child)
+struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
+ struct request_sock *req,
+ struct sock *child)
{
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
spin_lock(&queue->rskq_lock);
if (unlikely(sk->sk_state != TCP_LISTEN)) {
inet_child_forget(sk, req, child);
+ child = NULL;
} else {
req->sk = child;
req->dl_next = NULL;
@@ -808,6 +810,7 @@ void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
sk_acceptq_added(sk);
}
spin_unlock(&queue->rskq_lock);
+ return child;
}
EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
@@ -817,11 +820,8 @@ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
if (own_req) {
inet_csk_reqsk_queue_drop(sk, req);
reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
- inet_csk_reqsk_queue_add(sk, req, child);
- /* Warning: caller must not call reqsk_put(req);
- * child stole last reference on it.
- */
- return child;
+ if (inet_csk_reqsk_queue_add(sk, req, child))
+ return child;
}
/* Too bad, another child took ownership of the request, undo. */
bh_unlock_sock(child);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 1fe55ae81781..b8a0607dab96 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -661,6 +661,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
struct ipq *qp;
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
+ skb_orphan(skb);
/* Lookup (or create) queue header */
qp = ip_find(net, ip_hdr(skb), user, vif);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 33bef2763c72..dbf7f7ee2958 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -240,6 +240,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
* from host network stack.
*/
features = netif_skb_features(skb);
+ BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs)) {
kfree_skb(skb);
@@ -921,7 +922,7 @@ static int __ip_append_data(struct sock *sk,
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
- (sk->sk_type == SOCK_DGRAM)) {
+ (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, transhdrlen,
maxfraglen, flags);
@@ -1236,13 +1237,16 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
if (!skb)
return -EINVAL;
- cork->length += size;
if ((size + skb->len > mtu) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO)) {
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return -EOPNOTSUPP;
+
skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
}
+ cork->length += size;
while (size > 0) {
if (skb_is_gso(skb)) {
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 5f73a7c03e27..a50124260f5a 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -249,6 +249,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
switch (cmsg->cmsg_type) {
case IP_RETOPTS:
err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
+
+ /* Our caller is responsible for freeing ipc->opt */
err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
err < 40 ? err : 40);
if (err)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index cbb51f3fac06..ce30c8b72457 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -663,6 +663,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
connected = (tunnel->parms.iph.daddr != 0);
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+
dst = tnl_params->daddr;
if (dst == 0) {
/* NBMA tunnel */
@@ -760,7 +762,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
tunnel->err_count--;
- memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
dst_link_failure(skb);
} else
tunnel->err_count = 0;
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 6fb869f646bf..a04dee536b8e 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -27,8 +27,6 @@ static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb,
{
int err;
- skb_orphan(skb);
-
local_bh_disable();
err = ip_defrag(net, skb, user);
local_bh_enable();
diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
index c6eb42100e9a..ea91058b5f6f 100644
--- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
@@ -108,10 +108,18 @@ static int masq_inet_event(struct notifier_block *this,
unsigned long event,
void *ptr)
{
- struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
+ struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
struct netdev_notifier_info info;
- netdev_notifier_info_init(&info, dev);
+ /* The masq_dev_notifier will catch the case of the device going
+ * down. So if the inetdev is dead and being destroyed we have
+ * no work to do. Otherwise this is an individual address removal
+ * and we have to perform the flush.
+ */
+ if (idev->dead)
+ return NOTIFY_DONE;
+
+ netdev_notifier_info_init(&info, idev->dev);
return masq_device_event(this, event, &info);
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index b27e98010dea..0d5278ca4777 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -746,8 +746,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (msg->msg_controllen) {
err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
- if (err)
+ if (unlikely(err)) {
+ kfree(ipc.opt);
return err;
+ }
if (ipc.opt)
free = 1;
}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 865895d3fb27..a9b479a1c4a0 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -547,8 +547,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (msg->msg_controllen) {
err = ip_cmsg_send(net, msg, &ipc, false);
- if (err)
+ if (unlikely(err)) {
+ kfree(ipc.opt);
goto out;
+ }
if (ipc.opt)
free = 1;
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a0d842f4e9cf..79a957ea6545 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -129,6 +129,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
+static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
/*
* Interface to generic destination cache.
*/
@@ -757,7 +758,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
struct fib_nh *nh = &FIB_RES_NH(res);
update_or_create_fnhe(nh, fl4->daddr, new_gw,
- 0, 0);
+ 0, jiffies + ip_rt_gc_timeout);
}
if (kill_route)
rt->dst.obsolete = DST_OBSOLETE_KILL;
@@ -1558,6 +1559,36 @@ static void ip_handle_martian_source(struct net_device *dev,
#endif
}
+static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
+{
+ struct fnhe_hash_bucket *hash;
+ struct fib_nh_exception *fnhe, __rcu **fnhe_p;
+ u32 hval = fnhe_hashfun(daddr);
+
+ spin_lock_bh(&fnhe_lock);
+
+ hash = rcu_dereference_protected(nh->nh_exceptions,
+ lockdep_is_held(&fnhe_lock));
+ hash += hval;
+
+ fnhe_p = &hash->chain;
+ fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
+ while (fnhe) {
+ if (fnhe->fnhe_daddr == daddr) {
+ rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
+ fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
+ fnhe_flush_routes(fnhe);
+ kfree_rcu(fnhe, rcu);
+ break;
+ }
+ fnhe_p = &fnhe->fnhe_next;
+ fnhe = rcu_dereference_protected(fnhe->fnhe_next,
+ lockdep_is_held(&fnhe_lock));
+ }
+
+ spin_unlock_bh(&fnhe_lock);
+}
+
/* called in rcu_read_lock() section */
static int __mkroute_input(struct sk_buff *skb,
const struct fib_result *res,
@@ -1611,11 +1642,20 @@ static int __mkroute_input(struct sk_buff *skb,
fnhe = find_exception(&FIB_RES_NH(*res), daddr);
if (do_cache) {
- if (fnhe)
+ if (fnhe) {
rth = rcu_dereference(fnhe->fnhe_rth_input);
- else
- rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+ if (rth && rth->dst.expires &&
+ time_after(jiffies, rth->dst.expires)) {
+ ip_del_fnhe(&FIB_RES_NH(*res), daddr);
+ fnhe = NULL;
+ } else {
+ goto rt_cache;
+ }
+ }
+
+ rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+rt_cache:
if (rt_cache_valid(rth)) {
skb_dst_set_noref(skb, &rth->dst);
goto out;
@@ -2016,19 +2056,29 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
struct fib_nh *nh = &FIB_RES_NH(*res);
fnhe = find_exception(nh, fl4->daddr);
- if (fnhe)
+ if (fnhe) {
prth = &fnhe->fnhe_rth_output;
- else {
- if (unlikely(fl4->flowi4_flags &
- FLOWI_FLAG_KNOWN_NH &&
- !(nh->nh_gw &&
- nh->nh_scope == RT_SCOPE_LINK))) {
- do_cache = false;
- goto add;
+ rth = rcu_dereference(*prth);
+ if (rth && rth->dst.expires &&
+ time_after(jiffies, rth->dst.expires)) {
+ ip_del_fnhe(nh, fl4->daddr);
+ fnhe = NULL;
+ } else {
+ goto rt_cache;
}
- prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
}
+
+ if (unlikely(fl4->flowi4_flags &
+ FLOWI_FLAG_KNOWN_NH &&
+ !(nh->nh_gw &&
+ nh->nh_scope == RT_SCOPE_LINK))) {
+ do_cache = false;
+ goto add;
+ }
+ prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
rth = rcu_dereference(*prth);
+
+rt_cache:
if (rt_cache_valid(rth)) {
dst_hold(&rth->dst);
return rth;
@@ -2582,7 +2632,6 @@ void ip_rt_multicast_event(struct in_device *in_dev)
}
#ifdef CONFIG_SYSCTL
-static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
static int ip_rt_gc_interval __read_mostly = 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
static int ip_rt_gc_elasticity __read_mostly = 8;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index a1d5c67e251d..6ecfc9de599c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -279,6 +279,7 @@
#include <asm/uaccess.h>
#include <asm/ioctls.h>
+#include <asm/unaligned.h>
#include <net/busy_poll.h>
int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
@@ -938,7 +939,7 @@ new_segment:
i = skb_shinfo(skb)->nr_frags;
can_coalesce = skb_can_coalesce(skb, i, page, offset);
- if (!can_coalesce && i >= MAX_SKB_FRAGS) {
+ if (!can_coalesce && i >= sysctl_max_skb_frags) {
tcp_mark_push(tp, skb);
goto new_segment;
}
@@ -1211,7 +1212,7 @@ new_segment:
if (!skb_can_coalesce(skb, i, pfrag->page,
pfrag->offset)) {
- if (i == MAX_SKB_FRAGS || !sg) {
+ if (i == sysctl_max_skb_frags || !sg) {
tcp_mark_push(tp, skb);
goto new_segment;
}
@@ -2637,6 +2638,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 now = tcp_time_stamp;
unsigned int start;
+ u64 rate64;
u32 rate;
memset(info, 0, sizeof(*info));
@@ -2702,15 +2704,17 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_total_retrans = tp->total_retrans;
rate = READ_ONCE(sk->sk_pacing_rate);
- info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL;
+ rate64 = rate != ~0U ? rate : ~0ULL;
+ put_unaligned(rate64, &info->tcpi_pacing_rate);
rate = READ_ONCE(sk->sk_max_pacing_rate);
- info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
+ rate64 = rate != ~0U ? rate : ~0ULL;
+ put_unaligned(rate64, &info->tcpi_max_pacing_rate);
do {
start = u64_stats_fetch_begin_irq(&tp->syncp);
- info->tcpi_bytes_acked = tp->bytes_acked;
- info->tcpi_bytes_received = tp->bytes_received;
+ put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
+ put_unaligned(tp->bytes_received, &info->tcpi_bytes_received);
} while (u64_stats_fetch_retry_irq(&tp->syncp, start));
info->tcpi_segs_out = tp->segs_out;
info->tcpi_segs_in = tp->segs_in;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 205e6745393f..7decaa439360 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -312,7 +312,7 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk)
/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
-void tcp_req_err(struct sock *sk, u32 seq)
+void tcp_req_err(struct sock *sk, u32 seq, bool abort)
{
struct request_sock *req = inet_reqsk(sk);
struct net *net = sock_net(sk);
@@ -320,11 +320,9 @@ void tcp_req_err(struct sock *sk, u32 seq)
/* ICMPs are not backlogged, hence we cannot get
* an established socket here.
*/
- WARN_ON(req->sk);
-
if (seq != tcp_rsk(req)->snt_isn) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
- } else {
+ } else if (abort) {
/*
* Still in SYN_RECV, just remove it silently.
* There is no good way to pass the error to the newly
@@ -384,7 +382,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
}
seq = ntohl(th->seq);
if (sk->sk_state == TCP_NEW_SYN_RECV)
- return tcp_req_err(sk, seq);
+ return tcp_req_err(sk, seq,
+ type == ICMP_PARAMETERPROB ||
+ type == ICMP_TIME_EXCEEDED ||
+ (type == ICMP_DEST_UNREACH &&
+ (code == ICMP_NET_UNREACH ||
+ code == ICMP_HOST_UNREACH)));
bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
@@ -705,7 +708,8 @@ release_sk1:
outside socket context is ugly, certainly. What can I do?
*/
-static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+static void tcp_v4_send_ack(struct net *net,
+ struct sk_buff *skb, u32 seq, u32 ack,
u32 win, u32 tsval, u32 tsecr, int oif,
struct tcp_md5sig_key *key,
int reply_flags, u8 tos)
@@ -720,7 +724,6 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
];
} rep;
struct ip_reply_arg arg;
- struct net *net = dev_net(skb_dst(skb)->dev);
memset(&rep.th, 0, sizeof(struct tcphdr));
memset(&arg, 0, sizeof(arg));
@@ -782,7 +785,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
struct inet_timewait_sock *tw = inet_twsk(sk);
struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
- tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+ tcp_v4_send_ack(sock_net(sk), skb,
+ tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcp_time_stamp + tcptw->tw_ts_offset,
tcptw->tw_ts_recent,
@@ -801,8 +805,10 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
* sk->sk_state == TCP_SYN_RECV -> for Fast Open.
*/
- tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
- tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
+ u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
+ tcp_sk(sk)->snd_nxt;
+
+ tcp_v4_send_ack(sock_net(sk), skb, seq,
tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
tcp_time_stamp,
req->ts_recent,
@@ -1586,28 +1592,30 @@ process:
if (sk->sk_state == TCP_NEW_SYN_RECV) {
struct request_sock *req = inet_reqsk(sk);
- struct sock *nsk = NULL;
+ struct sock *nsk;
sk = req->rsk_listener;
- if (tcp_v4_inbound_md5_hash(sk, skb))
- goto discard_and_relse;
- if (likely(sk->sk_state == TCP_LISTEN)) {
- nsk = tcp_check_req(sk, skb, req, false);
- } else {
+ if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
+ reqsk_put(req);
+ goto discard_it;
+ }
+ if (unlikely(sk->sk_state != TCP_LISTEN)) {
inet_csk_reqsk_queue_drop_and_put(sk, req);
goto lookup;
}
+ sock_hold(sk);
+ nsk = tcp_check_req(sk, skb, req, false);
if (!nsk) {
reqsk_put(req);
- goto discard_it;
+ goto discard_and_relse;
}
if (nsk == sk) {
- sock_hold(sk);
reqsk_put(req);
} else if (tcp_child_process(sk, nsk, skb)) {
tcp_v4_send_reset(nsk, skb);
- goto discard_it;
+ goto discard_and_relse;
} else {
+ sock_put(sk);
return 0;
}
}
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index c8cbc2b4b792..a726d7853ce5 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -550,7 +550,7 @@ reset:
*/
if (crtt > tp->srtt_us) {
/* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
- crtt /= 8 * USEC_PER_MSEC;
+ crtt /= 8 * USEC_PER_SEC / HZ;
inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
} else if (tp->srtt_us == 0) {
/* RFC6298: 5.7 We've failed to get a valid RTT sample from
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index ac6b1961ffeb..9475a2748a9a 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -458,7 +458,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->rcv_wup = newtp->copied_seq =
newtp->rcv_nxt = treq->rcv_isn + 1;
- newtp->segs_in = 0;
+ newtp->segs_in = 1;
newtp->snd_sml = newtp->snd_una =
newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
@@ -818,6 +818,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
int ret = 0;
int state = child->sk_state;
+ tcp_sk(child)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
if (!sock_owned_by_user(child)) {
ret = tcp_rcv_state_process(child, skb);
/* Wakeup parent, send SIGIO */
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 17d35662930d..3e6a472e6b88 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -219,7 +219,7 @@ static u32 tcp_yeah_ssthresh(struct sock *sk)
yeah->fast_count = 0;
yeah->reno_count = max(yeah->reno_count>>1, 2U);
- return tp->snd_cwnd - reduction;
+ return max_t(int, tp->snd_cwnd - reduction, 2);
}
static struct tcp_congestion_ops tcp_yeah __read_mostly = {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d8946929813e..2ef43a102951 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -966,8 +966,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (msg->msg_controllen) {
err = ip_cmsg_send(sock_net(sk), msg, &ipc,
sk->sk_family == AF_INET6);
- if (err)
+ if (unlikely(err)) {
+ kfree(ipc.opt);
return err;
+ }
if (ipc.opt)
free = 1;
connected = 0;
@@ -1989,10 +1991,14 @@ void udp_v4_early_demux(struct sk_buff *skb)
if (!in_dev)
return;
- ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
- iph->protocol);
- if (!ours)
- return;
+ /* we are supposed to accept bcast packets */
+ if (skb->pkt_type == PACKET_MULTICAST) {
+ ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
+ iph->protocol);
+ if (!ours)
+ return;
+ }
+
sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
uh->source, iph->saddr, dif);
} else if (skb->pkt_type == PACKET_HOST) {
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index aba428626b52..280a9bdeddee 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -89,6 +89,8 @@ int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
uh->source = src_port;
uh->len = htons(skb->len);
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+
udp_set_csum(nocheck, skb, src, dst, skb->len);
return iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 4f7d6e0189cc..3cdf59161a7e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -585,7 +585,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
if (err < 0)
goto errout;
- err = EINVAL;
+ err = -EINVAL;
if (!tb[NETCONFA_IFINDEX])
goto errout;
@@ -3533,6 +3533,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
{
struct inet6_dev *idev = ifp->idev;
struct net_device *dev = idev->dev;
+ bool notify = false;
addrconf_join_solict(dev, &ifp->addr);
@@ -3578,7 +3579,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
/* Because optimistic nodes can use this address,
* notify listeners. If DAD fails, RTM_DELADDR is sent.
*/
- ipv6_ifa_notify(RTM_NEWADDR, ifp);
+ notify = true;
}
}
@@ -3586,6 +3587,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
out:
spin_unlock(&ifp->lock);
read_unlock_bh(&idev->lock);
+ if (notify)
+ ipv6_ifa_notify(RTM_NEWADDR, ifp);
}
static void addrconf_dad_start(struct inet6_ifaddr *ifp)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 0743a5f4c533..183ff87dacf3 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -163,6 +163,9 @@ ipv4_connected:
fl6.fl6_sport = inet->inet_sport;
fl6.flowi6_uid = sock_i_uid(sk);
+ if (!fl6.flowi6_oif)
+ fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
+
if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
fl6.flowi6_oif = np->mcast_oif;
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 835ec57c233b..840a4388f860 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -260,7 +260,11 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
return -EINVAL;
}
}
- return -ENOENT;
+ if (!found)
+ return -ENOENT;
+ if (fragoff)
+ *fragoff = _frag_off;
+ break;
}
hdrlen = 8;
} else if (nexthdr == NEXTHDR_AUTH) {
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 1f9ebe3cbb4a..dc2db4f7b182 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -540,12 +540,13 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
}
spin_lock_bh(&ip6_sk_fl_lock);
for (sflp = &np->ipv6_fl_list;
- (sfl = rcu_dereference(*sflp)) != NULL;
+ (sfl = rcu_dereference_protected(*sflp,
+ lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
sflp = &sfl->next) {
if (sfl->fl->label == freq.flr_label) {
if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
np->flow_label &= ~IPV6_FLOWLABEL_MASK;
- *sflp = rcu_dereference(sfl->next);
+ *sflp = sfl->next;
spin_unlock_bh(&ip6_sk_fl_lock);
fl_release(sfl->fl);
kfree_rcu(sfl, rcu);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index e5ea177d34c6..4650c6824783 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -778,6 +778,8 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
__u32 mtu;
int err;
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
encap_limit = t->parms.encap_limit;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index e6a7bd15b9b7..a175152d3e46 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -909,6 +909,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
struct rt6_info *rt;
#endif
int err;
+ int flags = 0;
/* The correct way to handle this would be to do
* ip6_route_get_saddr, and then ip6_route_output; however,
@@ -940,10 +941,13 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
dst_release(*dst);
*dst = NULL;
}
+
+ if (fl6->flowi6_oif)
+ flags |= RT6_LOOKUP_F_IFACE;
}
if (!*dst)
- *dst = ip6_route_output(net, sk, fl6);
+ *dst = ip6_route_output_flags(net, sk, fl6, flags);
err = (*dst)->error;
if (err)
@@ -1087,8 +1091,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int hh_len, int fragheaderlen,
- int transhdrlen, int mtu, unsigned int flags,
- const struct flowi6 *fl6)
+ int exthdrlen, int transhdrlen, int mtu,
+ unsigned int flags, const struct flowi6 *fl6)
{
struct sk_buff *skb;
@@ -1113,7 +1117,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
skb_put(skb, fragheaderlen + transhdrlen);
/* initialize network header pointer */
- skb_reset_network_header(skb);
+ skb_set_network_header(skb, exthdrlen);
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
@@ -1353,9 +1357,9 @@ emsgsize:
(skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO) &&
- (sk->sk_type == SOCK_DGRAM)) {
+ (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
- hh_len, fragheaderlen,
+ hh_len, fragheaderlen, exthdrlen,
transhdrlen, mtu, flags, fl6);
if (err)
goto error;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 137fca42aaa6..3991b21e24ad 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -343,12 +343,12 @@ static int ip6_tnl_create2(struct net_device *dev)
t = netdev_priv(dev);
+ dev->rtnl_link_ops = &ip6_link_ops;
err = register_netdevice(dev);
if (err < 0)
goto out;
strcpy(t->parms.name, dev->name);
- dev->rtnl_link_ops = &ip6_link_ops;
dev_hold(dev);
ip6_tnl_link(ip6n, t);
@@ -1180,6 +1180,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
u8 tproto;
int err;
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+
tproto = ACCESS_ONCE(t->parms.proto);
if (tproto != IPPROTO_IPIP && tproto != 0)
return -1;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 5ee56d0a8699..d64ee7e83664 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1574,9 +1574,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
return NULL;
skb->priority = TC_PRIO_CONTROL;
- skb->reserved_tailroom = skb_end_offset(skb) -
- min(mtu, skb_end_offset(skb));
skb_reserve(skb, hlen);
+ skb_tailroom_reserve(skb, mtu, tlen);
if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
/* <draft-ietf-magma-mld-source-05.txt>:
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 02ba70201e05..dd76806358fe 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1170,11 +1170,10 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table
return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
}
-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
- struct flowi6 *fl6)
+struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
+ struct flowi6 *fl6, int flags)
{
struct dst_entry *dst;
- int flags = 0;
bool any_src;
dst = l3mdev_rt6_dst_by_oif(net, fl6);
@@ -1195,7 +1194,7 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
}
-EXPORT_SYMBOL(ip6_route_output);
+EXPORT_SYMBOL_GPL(ip6_route_output_flags);
struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
{
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e6b044480333..f85b4c44c00d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -329,6 +329,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct tcp_sock *tp;
__u32 seq, snd_una;
struct sock *sk;
+ bool fatal;
int err;
sk = __inet6_lookup_established(net, &tcp_hashinfo,
@@ -347,8 +348,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
return;
}
seq = ntohl(th->seq);
+ fatal = icmpv6_err_convert(type, code, &err);
if (sk->sk_state == TCP_NEW_SYN_RECV)
- return tcp_req_err(sk, seq);
+ return tcp_req_err(sk, seq, fatal);
bh_lock_sock(sk);
if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
@@ -402,7 +404,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
goto out;
}
- icmpv6_err_convert(type, code, &err);
/* Might be for an request_sock */
switch (sk->sk_state) {
@@ -463,8 +464,10 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
if (np->repflow && ireq->pktopts)
fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
+ rcu_read_lock();
err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
np->tclass);
+ rcu_read_unlock();
err = net_xmit_eval(err);
}
@@ -1386,7 +1389,7 @@ process:
if (sk->sk_state == TCP_NEW_SYN_RECV) {
struct request_sock *req = inet_reqsk(sk);
- struct sock *nsk = NULL;
+ struct sock *nsk;
sk = req->rsk_listener;
tcp_v6_fill_cb(skb, hdr, th);
@@ -1394,24 +1397,24 @@ process:
reqsk_put(req);
goto discard_it;
}
- if (likely(sk->sk_state == TCP_LISTEN)) {
- nsk = tcp_check_req(sk, skb, req, false);
- } else {
+ if (unlikely(sk->sk_state != TCP_LISTEN)) {
inet_csk_reqsk_queue_drop_and_put(sk, req);
goto lookup;
}
+ sock_hold(sk);
+ nsk = tcp_check_req(sk, skb, req, false);
if (!nsk) {
reqsk_put(req);
- goto discard_it;
+ goto discard_and_relse;
}
if (nsk == sk) {
- sock_hold(sk);
reqsk_put(req);
tcp_v6_restore_cb(skb);
} else if (tcp_child_process(sk, nsk, skb)) {
tcp_v6_send_reset(nsk, skb);
- goto discard_it;
+ goto discard_and_relse;
} else {
+ sock_put(sk);
return 0;
}
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index a1b6adc20e1e..9cb0ff304336 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -837,8 +837,8 @@ start_lookup:
flush_stack(stack, count, skb, count - 1);
} else {
if (!inner_flushed)
- UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
- proto == IPPROTO_UDPLITE);
+ UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
+ proto == IPPROTO_UDPLITE);
consume_skb(skb);
}
return 0;
@@ -916,11 +916,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);
- /* a return value > 0 means to resubmit the input, but
- * it wants the return to be -protocol, or 0
- */
+ /* a return value > 0 means to resubmit the input */
if (ret > 0)
- return -ret;
+ return ret;
return 0;
}
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index f7fbdbabe50e..372855eeaf42 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -23,7 +23,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
- IP6_ECN_set_ce(inner_iph);
+ IP6_ECN_set_ce(skb, inner_iph);
}
/* Add encapsulation header.
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 435608c4306d..20ab7b2ec463 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -708,6 +708,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
if (!addr || addr->sa_family != AF_IUCV)
return -EINVAL;
+ if (addr_len < sizeof(struct sockaddr_iucv))
+ return -EINVAL;
+
lock_sock(sk);
if (sk->sk_state != IUCV_OPEN) {
err = -EBADFD;
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index ec22078b0914..42de4ccd159f 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -123,12 +123,11 @@ static int l2tp_ip_recv(struct sk_buff *skb)
struct l2tp_tunnel *tunnel = NULL;
int length;
- /* Point to L2TP header */
- optr = ptr = skb->data;
-
if (!pskb_may_pull(skb, 4))
goto discard;
+ /* Point to L2TP header */
+ optr = ptr = skb->data;
session_id = ntohl(*((__be32 *) ptr));
ptr += 4;
@@ -156,6 +155,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
if (!pskb_may_pull(skb, length))
goto discard;
+ /* Point to L2TP header */
+ optr = ptr = skb->data;
+ ptr += 4;
pr_debug("%s: ip recv\n", tunnel->name);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index a2c8747d2936..9ee4ddb6b397 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -135,12 +135,11 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
struct l2tp_tunnel *tunnel = NULL;
int length;
- /* Point to L2TP header */
- optr = ptr = skb->data;
-
if (!pskb_may_pull(skb, 4))
goto discard;
+ /* Point to L2TP header */
+ optr = ptr = skb->data;
session_id = ntohl(*((__be32 *) ptr));
ptr += 4;
@@ -168,6 +167,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
if (!pskb_may_pull(skb, length))
goto discard;
+ /* Point to L2TP header */
+ optr = ptr = skb->data;
+ ptr += 4;
pr_debug("%s: ip recv\n", tunnel->name);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index f93c5be612a7..2caaa84ce92d 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -124,8 +124,13 @@ static int l2tp_tunnel_notify(struct genl_family *family,
ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
NLM_F_ACK, tunnel, cmd);
- if (ret >= 0)
- return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
+ if (ret >= 0) {
+ ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
+ /* We don't care if no one is listening */
+ if (ret == -ESRCH)
+ ret = 0;
+ return ret;
+ }
nlmsg_free(msg);
@@ -147,8 +152,13 @@ static int l2tp_session_notify(struct genl_family *family,
ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
NLM_F_ACK, session, cmd);
- if (ret >= 0)
- return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
+ if (ret >= 0) {
+ ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
+ /* We don't care if no one is listening */
+ if (ret == -ESRCH)
+ ret = 0;
+ return ret;
+ }
nlmsg_free(msg);
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 10ad4ac1fa0b..367784be5df2 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -291,7 +291,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
}
/* prepare A-MPDU MLME for Rx aggregation */
- tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL);
+ tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL);
if (!tid_agg_rx)
goto end;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 337bb5d78003..980e9e9b6684 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -7,6 +7,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1484,14 +1485,21 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
- num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
- &ifibss->chandef,
- channels,
- ARRAY_SIZE(channels));
scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
- ieee80211_request_ibss_scan(sdata, ifibss->ssid,
- ifibss->ssid_len, channels, num,
- scan_width);
+
+ if (ifibss->fixed_channel) {
+ num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
+ &ifibss->chandef,
+ channels,
+ ARRAY_SIZE(channels));
+ ieee80211_request_ibss_scan(sdata, ifibss->ssid,
+ ifibss->ssid_len, channels,
+ num, scan_width);
+ } else {
+ ieee80211_request_ibss_scan(sdata, ifibss->ssid,
+ ifibss->ssid_len, NULL,
+ 0, scan_width);
+ }
} else {
int interval = IEEE80211_SCAN_INTERVAL;
@@ -1732,7 +1740,6 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
continue;
sdata->u.ibss.last_scan_completed = jiffies;
- ieee80211_queue_work(&local->hw, &sdata->work);
}
mutex_unlock(&local->iflist_mtx);
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 5322b4c71630..6837a46ca4a2 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -92,7 +92,7 @@ struct ieee80211_fragment_entry {
u16 extra_len;
u16 last_frag;
u8 rx_queue;
- bool ccmp; /* Whether fragments were encrypted with CCMP */
+ bool check_sequential_pn; /* needed for CCMP/GCMP */
u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
};
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index c9e325d2e120..7a2b7915093b 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -977,7 +977,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
if (sdata->vif.txq) {
struct txq_info *txqi = to_txq_info(sdata->vif.txq);
+ spin_lock_bh(&txqi->queue.lock);
ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
+ spin_unlock_bh(&txqi->queue.lock);
+
atomic_set(&sdata->txqs_len[txqi->txq.ac], 0);
}
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index fa28500f28fd..6f85b6ab8e51 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1370,17 +1370,6 @@ out:
sdata_unlock(sdata);
}
-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
-{
- struct ieee80211_sub_if_data *sdata;
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list)
- if (ieee80211_vif_is_mesh(&sdata->vif) &&
- ieee80211_sdata_running(sdata))
- ieee80211_queue_work(&local->hw, &sdata->work);
- rcu_read_unlock();
-}
void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
{
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index a1596344c3ba..4a8019f79fb2 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -362,14 +362,10 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
}
-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
-
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
void ieee80211s_stop(void);
#else
-static inline void
-ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
{ return false; }
static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 3aa04344942b..83097c3832d1 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4003,8 +4003,6 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
ieee80211_queue_work(&sdata->local->hw,
&sdata->u.mgd.monitor_work);
- /* and do all the other regular work too */
- ieee80211_queue_work(&sdata->local->hw, &sdata->work);
}
}
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 3ece7d1034c8..b54f398cda5d 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -711,7 +711,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta)
* computing cur_tp
*/
tmp_mrs = &mi->r[idx].stats;
- tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma);
+ tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10;
tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024;
return tmp_cur_tp;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 3928dbd24e25..239ed6e92b89 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -691,7 +691,7 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
if (likely(sta->ampdu_mlme.tid_tx[tid]))
return;
- ieee80211_start_tx_ba_session(pubsta, tid, 5000);
+ ieee80211_start_tx_ba_session(pubsta, tid, 0);
}
static void
@@ -871,7 +871,7 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
* - if station is in dynamic SMPS (and streams > 1)
* - for fallback rates, to increase chances of getting through
*/
- if (offset > 0 &&
+ if (offset > 0 ||
(mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC &&
group->streams > 1)) {
ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
@@ -1334,7 +1334,8 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
prob = mi->groups[i].rates[j].prob_ewma;
/* convert tp_avg from pkt per second in kbps */
- tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * AVG_PKT_SIZE * 8 / 1024;
+ tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
+ tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024;
return tp_avg;
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 82af407fea7a..a3bb8f7f5fc5 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1754,7 +1754,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
entry->seq = seq;
entry->rx_queue = rx_queue;
entry->last_frag = frag;
- entry->ccmp = 0;
+ entry->check_sequential_pn = false;
entry->extra_len = 0;
return entry;
@@ -1850,15 +1850,27 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
rx->seqno_idx, &(rx->skb));
if (rx->key &&
(rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
- rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256) &&
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
+ rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
ieee80211_has_protected(fc)) {
int queue = rx->security_idx;
- /* Store CCMP PN so that we can verify that the next
- * fragment has a sequential PN value. */
- entry->ccmp = 1;
+
+ /* Store CCMP/GCMP PN so that we can verify that the
+ * next fragment has a sequential PN value.
+ */
+ entry->check_sequential_pn = true;
memcpy(entry->last_pn,
rx->key->u.ccmp.rx_pn[queue],
IEEE80211_CCMP_PN_LEN);
+ BUILD_BUG_ON(offsetof(struct ieee80211_key,
+ u.ccmp.rx_pn) !=
+ offsetof(struct ieee80211_key,
+ u.gcmp.rx_pn));
+ BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
+ sizeof(rx->key->u.gcmp.rx_pn[queue]));
+ BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
+ IEEE80211_GCMP_PN_LEN);
}
return RX_QUEUED;
}
@@ -1873,15 +1885,21 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
return RX_DROP_MONITOR;
}
- /* Verify that MPDUs within one MSDU have sequential PN values.
- * (IEEE 802.11i, 8.3.3.4.5) */
- if (entry->ccmp) {
+ /* "The receiver shall discard MSDUs and MMPDUs whose constituent
+ * MPDU PN values are not incrementing in steps of 1."
+ * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
+ * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
+ */
+ if (entry->check_sequential_pn) {
int i;
u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
int queue;
+
if (!rx->key ||
(rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
- rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256))
+ rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
+ rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
+ rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
return RX_DROP_UNUSABLE;
memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
@@ -2232,7 +2250,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
struct ieee80211_local *local = rx->local;
struct ieee80211_sub_if_data *sdata = rx->sdata;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- u16 q, hdrlen;
+ u16 ac, q, hdrlen;
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -2301,7 +2319,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
ether_addr_equal(sdata->vif.addr, hdr->addr3))
return RX_CONTINUE;
- q = ieee80211_select_queue_80211(sdata, skb, hdr);
+ ac = ieee80211_select_queue_80211(sdata, skb, hdr);
+ q = sdata->vif.hw_queue[ac];
if (ieee80211_queue_stopped(&local->hw, q)) {
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
return RX_DROP_MONITOR;
@@ -3367,6 +3386,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
return false;
/* ignore action frames to TDLS-peers */
if (ieee80211_is_action(hdr->frame_control) &&
+ !is_broadcast_ether_addr(bssid) &&
!ether_addr_equal(bssid, hdr->addr1))
return false;
}
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index a413e52f7691..acbe182b75d1 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -314,6 +314,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
bool was_scanning = local->scanning;
struct cfg80211_scan_request *scan_req;
struct ieee80211_sub_if_data *scan_sdata;
+ struct ieee80211_sub_if_data *sdata;
lockdep_assert_held(&local->mtx);
@@ -373,7 +374,16 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
ieee80211_mlme_notify_scan_completed(local);
ieee80211_ibss_notify_scan_completed(local);
- ieee80211_mesh_notify_scan_completed(local);
+
+ /* Requeue all the work that might have been ignored while
+ * the scan was in progress; if there was none this will
+ * just be a no-op for the particular interface.
+ */
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ if (ieee80211_sdata_running(sdata))
+ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+ }
+
if (was_scanning)
ieee80211_start_next_roc(local);
}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index f91d1873218c..67066d048e6f 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -256,11 +256,11 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
}
/* Caller must hold local->sta_mtx */
-static void sta_info_hash_add(struct ieee80211_local *local,
- struct sta_info *sta)
+static int sta_info_hash_add(struct ieee80211_local *local,
+ struct sta_info *sta)
{
- rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
- sta_rht_params);
+ return rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
+ sta_rht_params);
}
static void sta_deliver_ps_frames(struct work_struct *wk)
@@ -484,11 +484,17 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
{
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct station_info sinfo;
+ struct station_info *sinfo;
int err = 0;
lockdep_assert_held(&local->sta_mtx);
+ sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
+ if (!sinfo) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
/* check if STA exists already */
if (sta_info_get_bss(sdata, sta->sta.addr)) {
err = -EEXIST;
@@ -503,7 +509,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
set_sta_flag(sta, WLAN_STA_BLOCK_BA);
/* make the station visible */
- sta_info_hash_add(local, sta);
+ err = sta_info_hash_add(local, sta);
+ if (err)
+ goto out_drop_sta;
list_add_tail_rcu(&sta->list, &local->sta_list);
@@ -520,10 +528,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
ieee80211_sta_debugfs_add(sta);
rate_control_add_sta_debugfs(sta);
- memset(&sinfo, 0, sizeof(sinfo));
- sinfo.filled = 0;
- sinfo.generation = local->sta_generation;
- cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
+ sinfo->generation = local->sta_generation;
+ cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
+ kfree(sinfo);
sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr);
@@ -538,6 +545,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
out_remove:
sta_info_hash_del(local, sta);
list_del_rcu(&sta->list);
+ out_drop_sta:
local->num_sta--;
synchronize_net();
__cleanup_single_sta(sta);
@@ -882,7 +890,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
{
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct station_info sinfo = {};
+ struct station_info *sinfo;
int ret;
/*
@@ -920,8 +928,11 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
- sta_set_sinfo(sta, &sinfo);
- cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
+ sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
+ if (sinfo)
+ sta_set_sinfo(sta, sinfo);
+ cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
+ kfree(sinfo);
rate_control_remove_sta_debugfs(sta);
ieee80211_sta_debugfs_remove(sta);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index c32fc411a911..881bc2072809 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -518,6 +518,9 @@ static struct net_device *find_outdev(struct net *net,
if (!dev)
return ERR_PTR(-ENODEV);
+ if (IS_ERR(dev))
+ return dev;
+
/* The caller is holding rtnl anyways, so release the dev reference */
dev_put(dev);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 91a8b004dc51..deadfdab1bc3 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -336,12 +336,10 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
unsigned short gso_type = skb_shinfo(skb)->gso_type;
struct sw_flow_key later_key;
struct sk_buff *segs, *nskb;
- struct ovs_skb_cb ovs_cb;
int err;
- ovs_cb = *OVS_CB(skb);
+ BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
segs = __skb_gso_segment(skb, NETIF_F_SG, false);
- *OVS_CB(skb) = ovs_cb;
if (IS_ERR(segs))
return PTR_ERR(segs);
if (segs == NULL)
@@ -359,7 +357,6 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
/* Queue all of the segments. */
skb = segs;
do {
- *OVS_CB(skb) = ovs_cb;
if (gso_type & SKB_GSO_UDP && skb != segs)
key = &later_key;
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 1605691d9414..d933cb89efac 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -90,7 +90,7 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
int err;
struct vxlan_config conf = {
.no_share = true,
- .flags = VXLAN_F_COLLECT_METADATA,
+ .flags = VXLAN_F_COLLECT_METADATA | VXLAN_F_UDP_ZERO_CSUM6_RX,
};
if (!options) {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 992396aa635c..da1ae0e13cb5 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1916,6 +1916,10 @@ retry:
goto retry;
}
+ if (!dev_validate_header(dev, skb->data, len)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
!packet_extra_vlan_len_allowed(dev, skb)) {
err = -EMSGSIZE;
@@ -2326,18 +2330,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
sock_wfree(skb);
}
-static bool ll_header_truncated(const struct net_device *dev, int len)
-{
- /* net device doesn't like empty head */
- if (unlikely(len < dev->hard_header_len)) {
- net_warn_ratelimited("%s: packet size is too short (%d < %d)\n",
- current->comm, len, dev->hard_header_len);
- return true;
- }
-
- return false;
-}
-
static void tpacket_set_protocol(const struct net_device *dev,
struct sk_buff *skb)
{
@@ -2420,19 +2412,19 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
if (unlikely(err < 0))
return -EINVAL;
} else if (dev->hard_header_len) {
- if (ll_header_truncated(dev, tp_len))
- return -EINVAL;
+ int hdrlen = min_t(int, dev->hard_header_len, tp_len);
skb_push(skb, dev->hard_header_len);
- err = skb_store_bits(skb, 0, data,
- dev->hard_header_len);
+ err = skb_store_bits(skb, 0, data, hdrlen);
if (unlikely(err))
return err;
+ if (!dev_validate_header(dev, skb->data, hdrlen))
+ return -EINVAL;
if (!skb->protocol)
tpacket_set_protocol(dev, skb);
- data += dev->hard_header_len;
- to_write -= dev->hard_header_len;
+ data += hdrlen;
+ to_write -= hdrlen;
}
offset = offset_in_page(data);
@@ -2763,9 +2755,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
if (unlikely(offset < 0))
goto out_free;
- } else {
- if (ll_header_truncated(dev, len))
- goto out_free;
}
/* Returns -EFAULT on error */
@@ -2773,6 +2762,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (err)
goto out_free;
+ if (sock->type == SOCK_RAW &&
+ !dev_validate_header(dev, skb->data, len)) {
+ err = -EINVAL;
+ goto out_free;
+ }
+
sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 10d42f3220ab..f925753668a7 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -377,6 +377,10 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
struct sockaddr_pn sa;
u16 len;
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_RX_DROP;
+
/* check we have at least a full Phonet header */
if (!pskb_pull(skb, sizeof(struct phonethdr)))
goto out;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 9f843bbe8c10..d778d99326df 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -1097,17 +1097,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
return res;
}
-static bool rfkill_readable(struct rfkill_data *data)
-{
- bool r;
-
- mutex_lock(&data->mtx);
- r = !list_empty(&data->events);
- mutex_unlock(&data->mtx);
-
- return r;
-}
-
static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
@@ -1124,8 +1113,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
goto out;
}
mutex_unlock(&data->mtx);
+ /* since we re-check and it just compares pointers,
+ * using !list_empty() without locking isn't a problem
+ */
ret = wait_event_interruptible(data->read_wait,
- rfkill_readable(data));
+ !list_empty(&data->events));
mutex_lock(&data->mtx);
if (ret)
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 57692947ebbe..95b021243233 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -252,23 +252,28 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
sizeof(key->eth.src));
+
fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
&mask->basic.n_proto, TCA_FLOWER_UNSPEC,
sizeof(key->basic.n_proto));
+
if (key->basic.n_proto == htons(ETH_P_IP) ||
key->basic.n_proto == htons(ETH_P_IPV6)) {
fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
&mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
sizeof(key->basic.ip_proto));
}
- if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+
+ if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
+ key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
&mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
sizeof(key->ipv4.src));
fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
&mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
sizeof(key->ipv4.dst));
- } else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
+ key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
&mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
sizeof(key->ipv6.src));
@@ -276,6 +281,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
&mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
sizeof(key->ipv6.dst));
}
+
if (key->basic.ip_proto == IPPROTO_TCP) {
fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
&mask->tp.src, TCA_FLOWER_UNSPEC,
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b5c2cf2aa6d4..af1acf009866 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1852,6 +1852,7 @@ reset:
}
tp = old_tp;
+ protocol = tc_skb_protocol(skb);
goto reclassify;
#endif
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index ec529121f38a..ce46f1c7f133 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -526,6 +526,8 @@ static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
}
return 0;
}
+ if (addr1->v6.sin6_port != addr2->v6.sin6_port)
+ return 0;
if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
return 0;
/* If this is a linklocal address, compare the scope_id. */
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 3d9ea9a48289..8b4ff315695e 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -60,6 +60,8 @@
#include <net/inet_common.h>
#include <net/inet_ecn.h>
+#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
+
/* Global data structures. */
struct sctp_globals sctp_globals __read_mostly;
@@ -1352,6 +1354,8 @@ static __init int sctp_init(void)
unsigned long limit;
int max_share;
int order;
+ int num_entries;
+ int max_entry_order;
sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
@@ -1404,14 +1408,24 @@ static __init int sctp_init(void)
/* Size and allocate the association hash table.
* The methodology is similar to that of the tcp hash tables.
+ * Though not identical. Start by getting a goal size
*/
if (totalram_pages >= (128 * 1024))
goal = totalram_pages >> (22 - PAGE_SHIFT);
else
goal = totalram_pages >> (24 - PAGE_SHIFT);
- for (order = 0; (1UL << order) < goal; order++)
- ;
+ /* Then compute the page order for said goal */
+ order = get_order(goal);
+
+ /* Now compute the required page order for the maximum sized table we
+ * want to create
+ */
+ max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
+ sizeof(struct sctp_bind_hashbucket));
+
+ /* Limit the page order by that maximum hash table size */
+ order = min(order, max_entry_order);
do {
sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE /
@@ -1445,20 +1459,35 @@ static __init int sctp_init(void)
INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
}
- /* Allocate and initialize the SCTP port hash table. */
+ /* Allocate and initialize the SCTP port hash table.
+ * Note that order is initalized to start at the max sized
+ * table we want to support. If we can't get that many pages
+ * reduce the order and try again
+ */
do {
- sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
- sizeof(struct sctp_bind_hashbucket);
- if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
- continue;
sctp_port_hashtable = (struct sctp_bind_hashbucket *)
__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order);
} while (!sctp_port_hashtable && --order > 0);
+
if (!sctp_port_hashtable) {
pr_err("Failed bind hash alloc\n");
status = -ENOMEM;
goto err_bhash_alloc;
}
+
+ /* Now compute the number of entries that will fit in the
+ * port hash space we allocated
+ */
+ num_entries = (1UL << order) * PAGE_SIZE /
+ sizeof(struct sctp_bind_hashbucket);
+
+ /* And finish by rounding it down to the nearest power of two
+ * this wastes some memory of course, but its needed because
+ * the hash function operates based on the assumption that
+ * that the number of entries is a power of two
+ */
+ sctp_port_hashsize = rounddown_pow_of_two(num_entries);
+
for (i = 0; i < sctp_port_hashsize; i++) {
spin_lock_init(&sctp_port_hashtable[i].lock);
INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ef1d90fdc773..be1489fc3234 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5542,6 +5542,7 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
struct sctp_hmac_algo_param *hmacs;
__u16 data_len = 0;
u32 num_idents;
+ int i;
if (!ep->auth_enable)
return -EACCES;
@@ -5559,8 +5560,12 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
return -EFAULT;
if (put_user(num_idents, &p->shmac_num_idents))
return -EFAULT;
- if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
- return -EFAULT;
+ for (i = 0; i < num_idents; i++) {
+ __u16 hmacid = ntohs(hmacs->hmac_ids[i]);
+
+ if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
+ return -EFAULT;
+ }
return 0;
}
@@ -6640,6 +6645,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
if (cmsgs->srinfo->sinfo_flags &
~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+ SCTP_SACK_IMMEDIATELY |
SCTP_ABORT | SCTP_EOF))
return -EINVAL;
break;
@@ -6663,6 +6669,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
if (cmsgs->sinfo->snd_flags &
~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+ SCTP_SACK_IMMEDIATELY |
SCTP_ABORT | SCTP_EOF))
return -EINVAL;
break;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 26d50c565f54..3e0fc5127225 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -320,7 +320,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
struct ctl_table tbl;
bool changed = false;
char *none = "none";
- char tmp[8];
+ char tmp[8] = {0};
int ret;
memset(&tbl, 0, sizeof(struct ctl_table));
diff --git a/net/socket.c b/net/socket.c
index d730ef9dfbf0..263b334ec5e4 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2238,31 +2238,31 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
break;
}
-out_put:
- fput_light(sock->file, fput_needed);
-
if (err == 0)
- return datagrams;
+ goto out_put;
- if (datagrams != 0) {
+ if (datagrams == 0) {
+ datagrams = err;
+ goto out_put;
+ }
+
+ /*
+ * We may return less entries than requested (vlen) if the
+ * sock is non block and there aren't enough datagrams...
+ */
+ if (err != -EAGAIN) {
/*
- * We may return less entries than requested (vlen) if the
- * sock is non block and there aren't enough datagrams...
+ * ... or if recvmsg returns an error after we
+ * received some datagrams, where we record the
+ * error to return on the next call or if the
+ * app asks about it using getsockopt(SO_ERROR).
*/
- if (err != -EAGAIN) {
- /*
- * ... or if recvmsg returns an error after we
- * received some datagrams, where we record the
- * error to return on the next call or if the
- * app asks about it using getsockopt(SO_ERROR).
- */
- sock->sk->sk_err = -err;
- }
-
- return datagrams;
+ sock->sk->sk_err = -err;
}
+out_put:
+ fput_light(sock->file, fput_needed);
- return err;
+ return datagrams;
}
SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 5e4f815c2b34..21e20353178e 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1225,7 +1225,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
if (bp[0] == '\\' && bp[1] == 'x') {
/* HEX STRING */
bp += 2;
- while (len < bufsize) {
+ while (len < bufsize - 1) {
int h, l;
h = hex_to_bin(bp[0]);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 2ffaf6a79499..027c9ef8a263 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -398,7 +398,6 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
if (unlikely(!sock))
return -ENOTSOCK;
- clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags);
if (base != 0) {
addr = NULL;
addrlen = 0;
@@ -442,7 +441,6 @@ static void xs_nospace_callback(struct rpc_task *task)
struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
transport->inet->sk_write_pending--;
- clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
}
/**
@@ -467,20 +465,11 @@ static int xs_nospace(struct rpc_task *task)
/* Don't race with disconnect */
if (xprt_connected(xprt)) {
- if (test_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags)) {
- /*
- * Notify TCP that we're limited by the application
- * window size
- */
- set_bit(SOCK_NOSPACE, &transport->sock->flags);
- sk->sk_write_pending++;
- /* ...and wait for more buffer space */
- xprt_wait_for_buffer_space(task, xs_nospace_callback);
- }
- } else {
- clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
+ /* wait for more buffer space */
+ sk->sk_write_pending++;
+ xprt_wait_for_buffer_space(task, xs_nospace_callback);
+ } else
ret = -ENOTCONN;
- }
spin_unlock_bh(&xprt->transport_lock);
@@ -616,9 +605,6 @@ process_status:
case -EAGAIN:
status = xs_nospace(task);
break;
- default:
- dprintk("RPC: sendmsg returned unrecognized error %d\n",
- -status);
case -ENETUNREACH:
case -ENOBUFS:
case -EPIPE:
@@ -626,7 +612,10 @@ process_status:
case -EPERM:
/* When the server has died, an ICMP port unreachable message
* prompts ECONNREFUSED. */
- clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
+ break;
+ default:
+ dprintk("RPC: sendmsg returned unrecognized error %d\n",
+ -status);
}
return status;
@@ -706,16 +695,16 @@ static int xs_tcp_send_request(struct rpc_task *task)
case -EAGAIN:
status = xs_nospace(task);
break;
- default:
- dprintk("RPC: sendmsg returned unrecognized error %d\n",
- -status);
case -ECONNRESET:
case -ECONNREFUSED:
case -ENOTCONN:
case -EADDRINUSE:
case -ENOBUFS:
case -EPIPE:
- clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
+ break;
+ default:
+ dprintk("RPC: sendmsg returned unrecognized error %d\n",
+ -status);
}
return status;
@@ -1609,19 +1598,23 @@ static void xs_tcp_state_change(struct sock *sk)
static void xs_write_space(struct sock *sk)
{
- struct socket *sock;
+ struct socket_wq *wq;
struct rpc_xprt *xprt;
- if (unlikely(!(sock = sk->sk_socket)))
+ if (!sk->sk_socket)
return;
- clear_bit(SOCK_NOSPACE, &sock->flags);
+ clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
if (unlikely(!(xprt = xprt_from_sock(sk))))
return;
- if (test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags) == 0)
- return;
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0)
+ goto out;
xprt_write_space(xprt);
+out:
+ rcu_read_unlock();
}
/**
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index f34e535e93bd..d5d7132ac847 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -20,6 +20,7 @@
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/if_vlan.h>
+#include <linux/rtnetlink.h>
#include <net/ip_fib.h>
#include <net/switchdev.h>
@@ -565,7 +566,6 @@ int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
}
EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
-static DEFINE_MUTEX(switchdev_mutex);
static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
/**
@@ -580,9 +580,9 @@ int register_switchdev_notifier(struct notifier_block *nb)
{
int err;
- mutex_lock(&switchdev_mutex);
+ rtnl_lock();
err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
- mutex_unlock(&switchdev_mutex);
+ rtnl_unlock();
return err;
}
EXPORT_SYMBOL_GPL(register_switchdev_notifier);
@@ -598,9 +598,9 @@ int unregister_switchdev_notifier(struct notifier_block *nb)
{
int err;
- mutex_lock(&switchdev_mutex);
+ rtnl_lock();
err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
- mutex_unlock(&switchdev_mutex);
+ rtnl_unlock();
return err;
}
EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
@@ -614,16 +614,17 @@ EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
* Call all network notifier blocks. This should be called by driver
* when it needs to propagate hardware event.
* Return values are same as for atomic_notifier_call_chain().
+ * rtnl_lock must be held.
*/
int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
struct switchdev_notifier_info *info)
{
int err;
+ ASSERT_RTNL();
+
info->dev = dev;
- mutex_lock(&switchdev_mutex);
err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
- mutex_unlock(&switchdev_mutex);
return err;
}
EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 9dc239dfe192..92e367a0a5ce 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -399,8 +399,10 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_LINK_GET);
- if (!hdr)
+ if (!hdr) {
+ tipc_bcast_unlock(net);
return -EMSGSIZE;
+ }
attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
if (!attrs)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 20cddec0a43c..3926b561f873 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -168,12 +168,6 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
skb_queue_head_init(&n_ptr->bc_entry.inputq1);
__skb_queue_head_init(&n_ptr->bc_entry.arrvq);
skb_queue_head_init(&n_ptr->bc_entry.inputq2);
- hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
- list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
- if (n_ptr->addr < temp_node->addr)
- break;
- }
- list_add_tail_rcu(&n_ptr->list, &temp_node->list);
n_ptr->state = SELF_DOWN_PEER_LEAVING;
n_ptr->signature = INVALID_NODE_SIG;
n_ptr->active_links[0] = INVALID_BEARER_ID;
@@ -193,6 +187,12 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
tipc_node_get(n_ptr);
setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
n_ptr->keepalive_intv = U32_MAX;
+ hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ if (n_ptr->addr < temp_node->addr)
+ break;
+ }
+ list_add_tail_rcu(&n_ptr->list, &temp_node->list);
exit:
spin_unlock_bh(&tn->node_list_lock);
return n_ptr;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index b53246fb0412..e53003cf7703 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -673,7 +673,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
struct tipc_sock *tsk = tipc_sk(sk);
struct net *net = sock_net(sk);
struct tipc_msg *mhdr = &tsk->phdr;
- struct sk_buff_head *pktchain = &sk->sk_write_queue;
+ struct sk_buff_head pktchain;
struct iov_iter save = msg->msg_iter;
uint mtu;
int rc;
@@ -687,14 +687,16 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
msg_set_nameupper(mhdr, seq->upper);
msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
+ skb_queue_head_init(&pktchain);
+
new_mtu:
mtu = tipc_bcast_get_mtu(net);
- rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, pktchain);
+ rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &pktchain);
if (unlikely(rc < 0))
return rc;
do {
- rc = tipc_bcast_xmit(net, pktchain);
+ rc = tipc_bcast_xmit(net, &pktchain);
if (likely(!rc))
return dsz;
@@ -704,7 +706,7 @@ new_mtu:
if (!rc)
continue;
}
- __skb_queue_purge(pktchain);
+ __skb_queue_purge(&pktchain);
if (rc == -EMSGSIZE) {
msg->msg_iter = save;
goto new_mtu;
@@ -863,7 +865,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
struct net *net = sock_net(sk);
struct tipc_msg *mhdr = &tsk->phdr;
u32 dnode, dport;
- struct sk_buff_head *pktchain = &sk->sk_write_queue;
+ struct sk_buff_head pktchain;
struct sk_buff *skb;
struct tipc_name_seq *seq;
struct iov_iter save;
@@ -924,17 +926,18 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
}
+ skb_queue_head_init(&pktchain);
save = m->msg_iter;
new_mtu:
mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
- rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain);
+ rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &pktchain);
if (rc < 0)
return rc;
do {
- skb = skb_peek(pktchain);
+ skb = skb_peek(&pktchain);
TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
- rc = tipc_node_xmit(net, pktchain, dnode, tsk->portid);
+ rc = tipc_node_xmit(net, &pktchain, dnode, tsk->portid);
if (likely(!rc)) {
if (sock->state != SS_READY)
sock->state = SS_CONNECTING;
@@ -946,7 +949,7 @@ new_mtu:
if (!rc)
continue;
}
- __skb_queue_purge(pktchain);
+ __skb_queue_purge(&pktchain);
if (rc == -EMSGSIZE) {
m->msg_iter = save;
goto new_mtu;
@@ -1016,7 +1019,7 @@ static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
struct net *net = sock_net(sk);
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_msg *mhdr = &tsk->phdr;
- struct sk_buff_head *pktchain = &sk->sk_write_queue;
+ struct sk_buff_head pktchain;
DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
u32 portid = tsk->portid;
int rc = -EINVAL;
@@ -1044,17 +1047,19 @@ static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
dnode = tsk_peer_node(tsk);
+ skb_queue_head_init(&pktchain);
next:
save = m->msg_iter;
mtu = tsk->max_pkt;
send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
- rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain);
+ rc = tipc_msg_build(mhdr, m, sent, send, mtu, &pktchain);
if (unlikely(rc < 0))
return rc;
+
do {
if (likely(!tsk_conn_cong(tsk))) {
- rc = tipc_node_xmit(net, pktchain, dnode, portid);
+ rc = tipc_node_xmit(net, &pktchain, dnode, portid);
if (likely(!rc)) {
tsk->sent_unacked++;
sent += send;
@@ -1063,7 +1068,7 @@ next:
goto next;
}
if (rc == -EMSGSIZE) {
- __skb_queue_purge(pktchain);
+ __skb_queue_purge(&pktchain);
tsk->max_pkt = tipc_node_get_mtu(net, dnode,
portid);
m->msg_iter = save;
@@ -1077,7 +1082,7 @@ next:
rc = tipc_wait_for_sndpkt(sock, &timeo);
} while (!rc);
- __skb_queue_purge(pktchain);
+ __skb_queue_purge(&pktchain);
return sent ? sent : rc;
}
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 350cca33ee0a..69ee2eeef968 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -289,15 +289,14 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
struct sockaddr_tipc *addr, void *usr_data,
void *buf, size_t len)
{
- struct tipc_subscriber *subscriber = usr_data;
+ struct tipc_subscriber *subscrb = usr_data;
struct tipc_subscription *sub = NULL;
struct tipc_net *tn = net_generic(net, tipc_net_id);
- tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscriber, &sub);
- if (sub)
- tipc_nametbl_subscribe(sub);
- else
- tipc_conn_terminate(tn->topsrv, subscriber->conid);
+ if (tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscrb, &sub))
+ return tipc_conn_terminate(tn->topsrv, subscrb->conid);
+
+ tipc_nametbl_subscribe(sub);
}
/* Handle one request to establish a new subscriber */
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index ef05cd9403d4..898a53a562b8 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1496,7 +1496,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
UNIXCB(skb).fp = NULL;
for (i = scm->fp->count-1; i >= 0; i--)
- unix_notinflight(scm->fp->fp[i]);
+ unix_notinflight(scm->fp->user, scm->fp->fp[i]);
}
static void unix_destruct_scm(struct sk_buff *skb)
@@ -1513,6 +1513,21 @@ static void unix_destruct_scm(struct sk_buff *skb)
sock_wfree(skb);
}
+/*
+ * The "user->unix_inflight" variable is protected by the garbage
+ * collection lock, and we just read it locklessly here. If you go
+ * over the limit, there might be a tiny race in actually noticing
+ * it across threads. Tough.
+ */
+static inline bool too_many_unix_fds(struct task_struct *p)
+{
+ struct user_struct *user = current_user();
+
+ if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
+ return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
+ return false;
+}
+
#define MAX_RECURSION_LEVEL 4
static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
@@ -1521,6 +1536,9 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
unsigned char max_level = 0;
int unix_sock_count = 0;
+ if (too_many_unix_fds(current))
+ return -ETOOMANYREFS;
+
for (i = scm->fp->count - 1; i >= 0; i--) {
struct sock *sk = unix_get_socket(scm->fp->fp[i]);
@@ -1542,10 +1560,8 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
if (!UNIXCB(skb).fp)
return -ENOMEM;
- if (unix_sock_count) {
- for (i = scm->fp->count - 1; i >= 0; i--)
- unix_inflight(scm->fp->fp[i]);
- }
+ for (i = scm->fp->count - 1; i >= 0; i--)
+ unix_inflight(scm->fp->user, scm->fp->fp[i]);
return max_level;
}
@@ -1765,7 +1781,12 @@ restart_locked:
goto out_unlock;
}
- if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+ /* other == sk && unix_peer(other) != sk if
+ * - unix_peer(sk) == NULL, destination address bound to sk
+ * - unix_peer(sk) == sk by time of get but disconnected before lock
+ */
+ if (other != sk &&
+ unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
if (timeo) {
timeo = unix_wait_for_peer(other, timeo);
@@ -2254,13 +2275,15 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
size_t size = state->size;
unsigned int last_len;
- err = -EINVAL;
- if (sk->sk_state != TCP_ESTABLISHED)
+ if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
+ err = -EINVAL;
goto out;
+ }
- err = -EOPNOTSUPP;
- if (flags & MSG_OOB)
+ if (unlikely(flags & MSG_OOB)) {
+ err = -EOPNOTSUPP;
goto out;
+ }
target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
timeo = sock_rcvtimeo(sk, noblock);
@@ -2306,9 +2329,11 @@ again:
goto unlock;
unix_state_unlock(sk);
- err = -EAGAIN;
- if (!timeo)
+ if (!timeo) {
+ err = -EAGAIN;
break;
+ }
+
mutex_unlock(&u->readlock);
timeo = unix_stream_data_wait(sk, timeo, last,
@@ -2316,6 +2341,7 @@ again:
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
+ scm_destroy(&scm);
goto out;
}
diff --git a/net/unix/diag.c b/net/unix/diag.c
index c512f64d5287..4d9679701a6d 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -220,7 +220,7 @@ done:
return skb->len;
}
-static struct sock *unix_lookup_by_ino(int ino)
+static struct sock *unix_lookup_by_ino(unsigned int ino)
{
int i;
struct sock *sk;
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index a73a226f2d33..6a0d48525fcf 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -116,15 +116,15 @@ struct sock *unix_get_socket(struct file *filp)
* descriptor if it is for an AF_UNIX socket.
*/
-void unix_inflight(struct file *fp)
+void unix_inflight(struct user_struct *user, struct file *fp)
{
struct sock *s = unix_get_socket(fp);
+ spin_lock(&unix_gc_lock);
+
if (s) {
struct unix_sock *u = unix_sk(s);
- spin_lock(&unix_gc_lock);
-
if (atomic_long_inc_return(&u->inflight) == 1) {
BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &gc_inflight_list);
@@ -132,25 +132,28 @@ void unix_inflight(struct file *fp)
BUG_ON(list_empty(&u->link));
}
unix_tot_inflight++;
- spin_unlock(&unix_gc_lock);
}
+ user->unix_inflight++;
+ spin_unlock(&unix_gc_lock);
}
-void unix_notinflight(struct file *fp)
+void unix_notinflight(struct user_struct *user, struct file *fp)
{
struct sock *s = unix_get_socket(fp);
+ spin_lock(&unix_gc_lock);
+
if (s) {
struct unix_sock *u = unix_sk(s);
- spin_lock(&unix_gc_lock);
BUG_ON(list_empty(&u->link));
if (atomic_long_dec_and_test(&u->inflight))
list_del_init(&u->link);
unix_tot_inflight--;
- spin_unlock(&unix_gc_lock);
}
+ user->unix_inflight--;
+ spin_unlock(&unix_gc_lock);
}
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
diff --git a/net/wireless/core.c b/net/wireless/core.c
index b0915515640e..8f0bac7e03c4 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1147,6 +1147,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
return NOTIFY_DONE;
}
+ wireless_nlevent_flush();
+
return NOTIFY_OK;
}
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index c8717c1d082e..b50ee5d622e1 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -342,6 +342,40 @@ static const int compat_event_type_size[] = {
/* IW event code */
+void wireless_nlevent_flush(void)
+{
+ struct sk_buff *skb;
+ struct net *net;
+
+ ASSERT_RTNL();
+
+ for_each_net(net) {
+ while ((skb = skb_dequeue(&net->wext_nlevents)))
+ rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
+ GFP_KERNEL);
+ }
+}
+EXPORT_SYMBOL_GPL(wireless_nlevent_flush);
+
+static int wext_netdev_notifier_call(struct notifier_block *nb,
+ unsigned long state, void *ptr)
+{
+ /*
+ * When a netdev changes state in any way, flush all pending messages
+ * to avoid them going out in a strange order, e.g. RTM_NEWLINK after
+ * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close()
+ * or similar - all of which could otherwise happen due to delays from
+ * schedule_work().
+ */
+ wireless_nlevent_flush();
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block wext_netdev_notifier = {
+ .notifier_call = wext_netdev_notifier_call,
+};
+
static int __net_init wext_pernet_init(struct net *net)
{
skb_queue_head_init(&net->wext_nlevents);
@@ -360,7 +394,12 @@ static struct pernet_operations wext_pernet_ops = {
static int __init wireless_nlevent_init(void)
{
- return register_pernet_subsys(&wext_pernet_ops);
+ int err = register_pernet_subsys(&wext_pernet_ops);
+
+ if (err)
+ return err;
+
+ return register_netdevice_notifier(&wext_netdev_notifier);
}
subsys_initcall(wireless_nlevent_init);
@@ -368,17 +407,8 @@ subsys_initcall(wireless_nlevent_init);
/* Process events generated by the wireless layer or the driver. */
static void wireless_nlevent_process(struct work_struct *work)
{
- struct sk_buff *skb;
- struct net *net;
-
rtnl_lock();
-
- for_each_net(net) {
- while ((skb = skb_dequeue(&net->wext_nlevents)))
- rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
- GFP_KERNEL);
- }
-
+ wireless_nlevent_flush();
rtnl_unlock();
}
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index ad7f5b3f9b61..1c4ad477ce93 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -292,12 +292,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
skb_dst_force(skb);
+ dev_hold(skb->dev);
nexthdr = x->type->input(x, skb);
if (nexthdr == -EINPROGRESS)
return 0;
resume:
+ dev_put(skb->dev);
+
spin_lock(&x->lock);
if (nexthdr <= 0) {
if (nexthdr == -EBADMSG) {
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index cc3676eb6239..ff4a91fcab9f 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -167,6 +167,8 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
{
struct sk_buff *segs;
+ BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
+ BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_SGO_CB_OFFSET);
segs = skb_gso_segment(skb, 0);
kfree_skb(skb);
if (IS_ERR(segs))
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
index 23e78dcd12bf..38b64f487315 100755
--- a/scripts/bloat-o-meter
+++ b/scripts/bloat-o-meter
@@ -58,8 +58,8 @@ for name in common:
delta.sort()
delta.reverse()
-print "add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
- (add, remove, grow, shrink, up, -down, up-down)
-print "%-40s %7s %7s %+7s" % ("function", "old", "new", "delta")
+print("add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
+ (add, remove, grow, shrink, up, -down, up-down))
+print("%-40s %7s %7s %+7s" % ("function", "old", "new", "delta"))
for d, n in delta:
- if d: print "%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d)
+ if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
diff --git a/scripts/coccinelle/iterators/use_after_iter.cocci b/scripts/coccinelle/iterators/use_after_iter.cocci
index f085f5968c52..ce8cc9c006e5 100644
--- a/scripts/coccinelle/iterators/use_after_iter.cocci
+++ b/scripts/coccinelle/iterators/use_after_iter.cocci
@@ -123,7 +123,7 @@ list_remove_head(x,c,...)
|
sizeof(<+...c...+>)
|
-&c->member
+ &c->member
|
c = E
|
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index d79cba4ce3eb..ebced77deb9c 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -96,13 +96,15 @@ savedefconfig: $(obj)/conf
defconfig: $(obj)/conf
ifeq ($(KBUILD_DEFCONFIG),)
$< $(silent) --defconfig $(Kconfig)
-else ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
+else
+ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
@$(kecho) "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
$(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
else
@$(kecho) "*** Default configuration is based on target '$(KBUILD_DEFCONFIG)'"
$(Q)$(MAKE) -f $(srctree)/Makefile $(KBUILD_DEFCONFIG)
endif
+endif
%_defconfig: $(obj)/conf
$(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$@ $(Kconfig)
diff --git a/scripts/ld-version.sh b/scripts/ld-version.sh
index 198580d245e0..1659b409ef10 100755
--- a/scripts/ld-version.sh
+++ b/scripts/ld-version.sh
@@ -1,7 +1,7 @@
#!/usr/bin/awk -f
# extract linker version number from stdin and turn into single number
{
- gsub(".*)", "");
+ gsub(".*\\)", "");
split($1,a, ".");
print a[1]*10000000 + a[2]*100000 + a[3]*10000 + a[4]*100 + a[5];
exit
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index dacf71a43ad4..ba6c34ea5429 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -62,7 +62,7 @@ vmlinux_link()
-Wl,--start-group \
${KBUILD_VMLINUX_MAIN} \
-Wl,--end-group \
- -lutil -lrt ${1}
+ -lutil -lrt -lpthread ${1}
rm -f linux
fi
}
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index 71004daefe31..fe44d68e9344 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -131,11 +131,11 @@ echo 'rm -rf $RPM_BUILD_ROOT'
echo ""
echo "%post"
echo "if [ -x /sbin/installkernel -a -r /boot/vmlinuz-$KERNELRELEASE -a -r /boot/System.map-$KERNELRELEASE ]; then"
-echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm"
-echo "cp /boot/System.map-$KERNELRELEASE /boot/System.map-$KERNELRELEASE-rpm"
+echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm"
+echo "cp /boot/System.map-$KERNELRELEASE /boot/.System.map-$KERNELRELEASE-rpm"
echo "rm -f /boot/vmlinuz-$KERNELRELEASE /boot/System.map-$KERNELRELEASE"
-echo "/sbin/installkernel $KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
-echo "rm -f /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
+echo "/sbin/installkernel $KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
+echo "rm -f /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
echo "fi"
echo ""
echo "%files"
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 826470d7f000..96e2486a6fc4 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -263,7 +263,8 @@ if ($arch eq "x86_64") {
} elsif ($arch eq "powerpc") {
$local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
- $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:";
+ # See comment in the sparc64 section for why we use '\w'.
+ $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?\\w*?)>:";
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$";
if ($bits == 64) {
diff --git a/security/commoncap.c b/security/commoncap.c
index f035b84b3601..7fa251aea32f 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -148,12 +148,17 @@ int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
{
int ret = 0;
const struct cred *cred, *child_cred;
+ const kernel_cap_t *caller_caps;
rcu_read_lock();
cred = current_cred();
child_cred = __task_cred(child);
+ if (mode & PTRACE_MODE_FSCREDS)
+ caller_caps = &cred->cap_effective;
+ else
+ caller_caps = &cred->cap_permitted;
if (cred->user_ns == child_cred->user_ns &&
- cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
+ cap_issubset(child_cred->cap_permitted, *caller_caps))
goto out;
if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE))
goto out;
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 1334e02ae8f4..3d145a3ffccf 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -23,6 +23,7 @@
#include <linux/integrity.h>
#include <linux/evm.h>
#include <crypto/hash.h>
+#include <crypto/algapi.h>
#include "evm.h"
int evm_initialized;
@@ -148,7 +149,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
xattr_value_len, calc.digest);
if (rc)
break;
- rc = memcmp(xattr_data->digest, calc.digest,
+ rc = crypto_memneq(xattr_data->digest, calc.digest,
sizeof(calc.digest));
if (rc)
rc = -EINVAL;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index ff81026f6ddb..7c57c7fcf5a2 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -398,12 +398,10 @@ static int smk_copy_relabel(struct list_head *nhead, struct list_head *ohead,
*/
static inline unsigned int smk_ptrace_mode(unsigned int mode)
{
- switch (mode) {
- case PTRACE_MODE_READ:
- return MAY_READ;
- case PTRACE_MODE_ATTACH:
+ if (mode & PTRACE_MODE_ATTACH)
return MAY_READWRITE;
- }
+ if (mode & PTRACE_MODE_READ)
+ return MAY_READ;
return 0;
}
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index d3c19c970a06..cb6ed10816d4 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -281,7 +281,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
int rc = 0;
/* require ptrace target be a child of ptracer on attach */
- if (mode == PTRACE_MODE_ATTACH) {
+ if (mode & PTRACE_MODE_ATTACH) {
switch (ptrace_scope) {
case YAMA_SCOPE_DISABLED:
/* No additional restrictions. */
@@ -307,7 +307,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
}
}
- if (rc) {
+ if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) {
printk_ratelimited(KERN_NOTICE
"ptrace of pid %d was attempted by: %s (pid %d)\n",
child->pid, current->comm, current->pid);
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index b123c42e7dc8..b554d7f9e3be 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -44,6 +44,13 @@
#include <sound/compress_offload.h>
#include <sound/compress_driver.h>
+/* struct snd_compr_codec_caps overflows the ioctl bit size for some
+ * architectures, so we need to disable the relevant ioctls.
+ */
+#if _IOC_SIZEBITS < 14
+#define COMPR_CODEC_CAPS_OVERFLOW
+#endif
+
/* TODO:
* - add substream support for multiple devices in case of
* SND_DYNAMIC_MINORS is not used
@@ -438,6 +445,7 @@ out:
return retval;
}
+#ifndef COMPR_CODEC_CAPS_OVERFLOW
static int
snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
{
@@ -461,6 +469,7 @@ out:
kfree(caps);
return retval;
}
+#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
/* revisit this with snd_pcm_preallocate_xxx */
static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
@@ -799,9 +808,11 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
retval = snd_compr_get_caps(stream, arg);
break;
+#ifndef COMPR_CODEC_CAPS_OVERFLOW
case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
retval = snd_compr_get_codec_caps(stream, arg);
break;
+#endif
case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
retval = snd_compr_set_params(stream, arg);
break;
diff --git a/sound/core/control.c b/sound/core/control.c
index 196a6fe100ca..a85d45595d02 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -1405,6 +1405,8 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
return -EFAULT;
if (tlv.length < sizeof(unsigned int) * 2)
return -EINVAL;
+ if (!tlv.numid)
+ return -EINVAL;
down_read(&card->controls_rwsem);
kctl = snd_ctl_find_numid(card, tlv.numid);
if (kctl == NULL) {
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
index b9c0910fb8c4..0608f216f359 100644
--- a/sound/core/control_compat.c
+++ b/sound/core/control_compat.c
@@ -170,6 +170,19 @@ struct snd_ctl_elem_value32 {
unsigned char reserved[128];
};
+#ifdef CONFIG_X86_X32
+/* x32 has a different alignment for 64bit values from ia32 */
+struct snd_ctl_elem_value_x32 {
+ struct snd_ctl_elem_id id;
+ unsigned int indirect; /* bit-field causes misalignment */
+ union {
+ s32 integer[128];
+ unsigned char data[512];
+ s64 integer64[64];
+ } value;
+ unsigned char reserved[128];
+};
+#endif /* CONFIG_X86_X32 */
/* get the value type and count of the control */
static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id,
@@ -219,9 +232,11 @@ static int get_elem_size(int type, int count)
static int copy_ctl_value_from_user(struct snd_card *card,
struct snd_ctl_elem_value *data,
- struct snd_ctl_elem_value32 __user *data32,
+ void __user *userdata,
+ void __user *valuep,
int *typep, int *countp)
{
+ struct snd_ctl_elem_value32 __user *data32 = userdata;
int i, type, size;
int uninitialized_var(count);
unsigned int indirect;
@@ -239,8 +254,9 @@ static int copy_ctl_value_from_user(struct snd_card *card,
if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
for (i = 0; i < count; i++) {
+ s32 __user *intp = valuep;
int val;
- if (get_user(val, &data32->value.integer[i]))
+ if (get_user(val, &intp[i]))
return -EFAULT;
data->value.integer.value[i] = val;
}
@@ -250,8 +266,7 @@ static int copy_ctl_value_from_user(struct snd_card *card,
dev_err(card->dev, "snd_ioctl32_ctl_elem_value: unknown type %d\n", type);
return -EINVAL;
}
- if (copy_from_user(data->value.bytes.data,
- data32->value.data, size))
+ if (copy_from_user(data->value.bytes.data, valuep, size))
return -EFAULT;
}
@@ -261,7 +276,8 @@ static int copy_ctl_value_from_user(struct snd_card *card,
}
/* restore the value to 32bit */
-static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
+static int copy_ctl_value_to_user(void __user *userdata,
+ void __user *valuep,
struct snd_ctl_elem_value *data,
int type, int count)
{
@@ -270,22 +286,22 @@ static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
for (i = 0; i < count; i++) {
+ s32 __user *intp = valuep;
int val;
val = data->value.integer.value[i];
- if (put_user(val, &data32->value.integer[i]))
+ if (put_user(val, &intp[i]))
return -EFAULT;
}
} else {
size = get_elem_size(type, count);
- if (copy_to_user(data32->value.data,
- data->value.bytes.data, size))
+ if (copy_to_user(valuep, data->value.bytes.data, size))
return -EFAULT;
}
return 0;
}
-static int snd_ctl_elem_read_user_compat(struct snd_card *card,
- struct snd_ctl_elem_value32 __user *data32)
+static int ctl_elem_read_user(struct snd_card *card,
+ void __user *userdata, void __user *valuep)
{
struct snd_ctl_elem_value *data;
int err, type, count;
@@ -294,7 +310,9 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
if (data == NULL)
return -ENOMEM;
- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
+ err = copy_ctl_value_from_user(card, data, userdata, valuep,
+ &type, &count);
+ if (err < 0)
goto error;
snd_power_lock(card);
@@ -303,14 +321,15 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
err = snd_ctl_elem_read(card, data);
snd_power_unlock(card);
if (err >= 0)
- err = copy_ctl_value_to_user(data32, data, type, count);
+ err = copy_ctl_value_to_user(userdata, valuep, data,
+ type, count);
error:
kfree(data);
return err;
}
-static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
- struct snd_ctl_elem_value32 __user *data32)
+static int ctl_elem_write_user(struct snd_ctl_file *file,
+ void __user *userdata, void __user *valuep)
{
struct snd_ctl_elem_value *data;
struct snd_card *card = file->card;
@@ -320,7 +339,9 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
if (data == NULL)
return -ENOMEM;
- if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
+ err = copy_ctl_value_from_user(card, data, userdata, valuep,
+ &type, &count);
+ if (err < 0)
goto error;
snd_power_lock(card);
@@ -329,12 +350,39 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
err = snd_ctl_elem_write(card, file, data);
snd_power_unlock(card);
if (err >= 0)
- err = copy_ctl_value_to_user(data32, data, type, count);
+ err = copy_ctl_value_to_user(userdata, valuep, data,
+ type, count);
error:
kfree(data);
return err;
}
+static int snd_ctl_elem_read_user_compat(struct snd_card *card,
+ struct snd_ctl_elem_value32 __user *data32)
+{
+ return ctl_elem_read_user(card, data32, &data32->value);
+}
+
+static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
+ struct snd_ctl_elem_value32 __user *data32)
+{
+ return ctl_elem_write_user(file, data32, &data32->value);
+}
+
+#ifdef CONFIG_X86_X32
+static int snd_ctl_elem_read_user_x32(struct snd_card *card,
+ struct snd_ctl_elem_value_x32 __user *data32)
+{
+ return ctl_elem_read_user(card, data32, &data32->value);
+}
+
+static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file,
+ struct snd_ctl_elem_value_x32 __user *data32)
+{
+ return ctl_elem_write_user(file, data32, &data32->value);
+}
+#endif /* CONFIG_X86_X32 */
+
/* add or replace a user control */
static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
struct snd_ctl_elem_info32 __user *data32,
@@ -393,6 +441,10 @@ enum {
SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32),
SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32),
SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32),
+#ifdef CONFIG_X86_X32
+ SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32),
+ SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32),
+#endif /* CONFIG_X86_X32 */
};
static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -431,6 +483,12 @@ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, uns
return snd_ctl_elem_add_compat(ctl, argp, 0);
case SNDRV_CTL_IOCTL_ELEM_REPLACE32:
return snd_ctl_elem_add_compat(ctl, argp, 1);
+#ifdef CONFIG_X86_X32
+ case SNDRV_CTL_IOCTL_ELEM_READ_X32:
+ return snd_ctl_elem_read_user_x32(ctl->card, argp);
+ case SNDRV_CTL_IOCTL_ELEM_WRITE_X32:
+ return snd_ctl_elem_write_user_x32(ctl, argp);
+#endif /* CONFIG_X86_X32 */
}
down_read(&snd_ioctl_rwsem);
diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
index f845ecf7e172..656d9a9032dc 100644
--- a/sound/core/hrtimer.c
+++ b/sound/core/hrtimer.c
@@ -90,7 +90,7 @@ static int snd_hrtimer_start(struct snd_timer *t)
struct snd_hrtimer *stime = t->private_data;
atomic_set(&stime->running, 0);
- hrtimer_cancel(&stime->hrt);
+ hrtimer_try_to_cancel(&stime->hrt);
hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution),
HRTIMER_MODE_REL);
atomic_set(&stime->running, 1);
@@ -101,6 +101,7 @@ static int snd_hrtimer_stop(struct snd_timer *t)
{
struct snd_hrtimer *stime = t->private_data;
atomic_set(&stime->running, 0);
+ hrtimer_try_to_cancel(&stime->hrt);
return 0;
}
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 58550cc93f28..33e72c809e50 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -834,7 +834,8 @@ static int choose_rate(struct snd_pcm_substream *substream,
return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
}
-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
+static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
+ bool trylock)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_pcm_hw_params *params, *sparams;
@@ -848,7 +849,10 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
struct snd_mask sformat_mask;
struct snd_mask mask;
- if (mutex_lock_interruptible(&runtime->oss.params_lock))
+ if (trylock) {
+ if (!(mutex_trylock(&runtime->oss.params_lock)))
+ return -EAGAIN;
+ } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
return -EINTR;
sw_params = kmalloc(sizeof(*sw_params), GFP_KERNEL);
params = kmalloc(sizeof(*params), GFP_KERNEL);
@@ -1092,7 +1096,7 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil
if (asubstream == NULL)
asubstream = substream;
if (substream->runtime->oss.params) {
- err = snd_pcm_oss_change_params(substream);
+ err = snd_pcm_oss_change_params(substream, false);
if (err < 0)
return err;
}
@@ -1132,7 +1136,7 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
return 0;
runtime = substream->runtime;
if (runtime->oss.params) {
- err = snd_pcm_oss_change_params(substream);
+ err = snd_pcm_oss_change_params(substream, false);
if (err < 0)
return err;
}
@@ -2163,7 +2167,7 @@ static int snd_pcm_oss_get_space(struct snd_pcm_oss_file *pcm_oss_file, int stre
runtime = substream->runtime;
if (runtime->oss.params &&
- (err = snd_pcm_oss_change_params(substream)) < 0)
+ (err = snd_pcm_oss_change_params(substream, false)) < 0)
return err;
info.fragsize = runtime->oss.period_bytes;
@@ -2800,7 +2804,12 @@ static int snd_pcm_oss_mmap(struct file *file, struct vm_area_struct *area)
return -EIO;
if (runtime->oss.params) {
- if ((err = snd_pcm_oss_change_params(substream)) < 0)
+ /* use mutex_trylock() for params_lock for avoiding a deadlock
+ * between mmap_sem and params_lock taken by
+ * copy_from/to_user() in snd_pcm_oss_write/read()
+ */
+ err = snd_pcm_oss_change_params(substream, true);
+ if (err < 0)
return err;
}
#ifdef CONFIG_SND_PCM_OSS_PLUGINS
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index b48b434444ed..1f64ab0c2a95 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -183,6 +183,14 @@ static int snd_pcm_ioctl_channel_info_compat(struct snd_pcm_substream *substream
return err;
}
+#ifdef CONFIG_X86_X32
+/* X32 ABI has the same struct as x86-64 for snd_pcm_channel_info */
+static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
+ struct snd_pcm_channel_info __user *src);
+#define snd_pcm_ioctl_channel_info_x32(s, p) \
+ snd_pcm_channel_info_user(s, p)
+#endif /* CONFIG_X86_X32 */
+
struct snd_pcm_status32 {
s32 state;
struct compat_timespec trigger_tstamp;
@@ -243,6 +251,71 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
return err;
}
+#ifdef CONFIG_X86_X32
+/* X32 ABI has 64bit timespec and 64bit alignment */
+struct snd_pcm_status_x32 {
+ s32 state;
+ u32 rsvd; /* alignment */
+ struct timespec trigger_tstamp;
+ struct timespec tstamp;
+ u32 appl_ptr;
+ u32 hw_ptr;
+ s32 delay;
+ u32 avail;
+ u32 avail_max;
+ u32 overrange;
+ s32 suspended_state;
+ u32 audio_tstamp_data;
+ struct timespec audio_tstamp;
+ struct timespec driver_tstamp;
+ u32 audio_tstamp_accuracy;
+ unsigned char reserved[52-2*sizeof(struct timespec)];
+} __packed;
+
+#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
+
+static int snd_pcm_status_user_x32(struct snd_pcm_substream *substream,
+ struct snd_pcm_status_x32 __user *src,
+ bool ext)
+{
+ struct snd_pcm_status status;
+ int err;
+
+ memset(&status, 0, sizeof(status));
+ /*
+ * with extension, parameters are read/write,
+ * get audio_tstamp_data from user,
+ * ignore rest of status structure
+ */
+ if (ext && get_user(status.audio_tstamp_data,
+ (u32 __user *)(&src->audio_tstamp_data)))
+ return -EFAULT;
+ err = snd_pcm_status(substream, &status);
+ if (err < 0)
+ return err;
+
+ if (clear_user(src, sizeof(*src)))
+ return -EFAULT;
+ if (put_user(status.state, &src->state) ||
+ put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
+ put_timespec(&status.tstamp, &src->tstamp) ||
+ put_user(status.appl_ptr, &src->appl_ptr) ||
+ put_user(status.hw_ptr, &src->hw_ptr) ||
+ put_user(status.delay, &src->delay) ||
+ put_user(status.avail, &src->avail) ||
+ put_user(status.avail_max, &src->avail_max) ||
+ put_user(status.overrange, &src->overrange) ||
+ put_user(status.suspended_state, &src->suspended_state) ||
+ put_user(status.audio_tstamp_data, &src->audio_tstamp_data) ||
+ put_timespec(&status.audio_tstamp, &src->audio_tstamp) ||
+ put_timespec(&status.driver_tstamp, &src->driver_tstamp) ||
+ put_user(status.audio_tstamp_accuracy, &src->audio_tstamp_accuracy))
+ return -EFAULT;
+
+ return err;
+}
+#endif /* CONFIG_X86_X32 */
+
/* both for HW_PARAMS and HW_REFINE */
static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
int refine,
@@ -255,10 +328,15 @@ static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
if (! (runtime = substream->runtime))
return -ENOTTY;
- /* only fifo_size is different, so just copy all */
- data = memdup_user(data32, sizeof(*data32));
- if (IS_ERR(data))
- return PTR_ERR(data);
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* only fifo_size (RO from userspace) is different, so just copy all */
+ if (copy_from_user(data, data32, sizeof(*data32))) {
+ err = -EFAULT;
+ goto error;
+ }
if (refine)
err = snd_pcm_hw_refine(substream, data);
@@ -464,6 +542,93 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
return 0;
}
+#ifdef CONFIG_X86_X32
+/* X32 ABI has 64bit timespec and 64bit alignment */
+struct snd_pcm_mmap_status_x32 {
+ s32 state;
+ s32 pad1;
+ u32 hw_ptr;
+ u32 pad2; /* alignment */
+ struct timespec tstamp;
+ s32 suspended_state;
+ struct timespec audio_tstamp;
+} __packed;
+
+struct snd_pcm_mmap_control_x32 {
+ u32 appl_ptr;
+ u32 avail_min;
+};
+
+struct snd_pcm_sync_ptr_x32 {
+ u32 flags;
+ u32 rsvd; /* alignment */
+ union {
+ struct snd_pcm_mmap_status_x32 status;
+ unsigned char reserved[64];
+ } s;
+ union {
+ struct snd_pcm_mmap_control_x32 control;
+ unsigned char reserved[64];
+ } c;
+} __packed;
+
+static int snd_pcm_ioctl_sync_ptr_x32(struct snd_pcm_substream *substream,
+ struct snd_pcm_sync_ptr_x32 __user *src)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ volatile struct snd_pcm_mmap_status *status;
+ volatile struct snd_pcm_mmap_control *control;
+ u32 sflags;
+ struct snd_pcm_mmap_control scontrol;
+ struct snd_pcm_mmap_status sstatus;
+ snd_pcm_uframes_t boundary;
+ int err;
+
+ if (snd_BUG_ON(!runtime))
+ return -EINVAL;
+
+ if (get_user(sflags, &src->flags) ||
+ get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
+ get_user(scontrol.avail_min, &src->c.control.avail_min))
+ return -EFAULT;
+ if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
+ err = snd_pcm_hwsync(substream);
+ if (err < 0)
+ return err;
+ }
+ status = runtime->status;
+ control = runtime->control;
+ boundary = recalculate_boundary(runtime);
+ if (!boundary)
+ boundary = 0x7fffffff;
+ snd_pcm_stream_lock_irq(substream);
+ /* FIXME: we should consider the boundary for the sync from app */
+ if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
+ control->appl_ptr = scontrol.appl_ptr;
+ else
+ scontrol.appl_ptr = control->appl_ptr % boundary;
+ if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
+ control->avail_min = scontrol.avail_min;
+ else
+ scontrol.avail_min = control->avail_min;
+ sstatus.state = status->state;
+ sstatus.hw_ptr = status->hw_ptr % boundary;
+ sstatus.tstamp = status->tstamp;
+ sstatus.suspended_state = status->suspended_state;
+ sstatus.audio_tstamp = status->audio_tstamp;
+ snd_pcm_stream_unlock_irq(substream);
+ if (put_user(sstatus.state, &src->s.status.state) ||
+ put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
+ put_timespec(&sstatus.tstamp, &src->s.status.tstamp) ||
+ put_user(sstatus.suspended_state, &src->s.status.suspended_state) ||
+ put_timespec(&sstatus.audio_tstamp, &src->s.status.audio_tstamp) ||
+ put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
+ put_user(scontrol.avail_min, &src->c.control.avail_min))
+ return -EFAULT;
+
+ return 0;
+}
+#endif /* CONFIG_X86_X32 */
/*
*/
@@ -482,7 +647,12 @@ enum {
SNDRV_PCM_IOCTL_WRITEN_FRAMES32 = _IOW('A', 0x52, struct snd_xfern32),
SNDRV_PCM_IOCTL_READN_FRAMES32 = _IOR('A', 0x53, struct snd_xfern32),
SNDRV_PCM_IOCTL_SYNC_PTR32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr32),
-
+#ifdef CONFIG_X86_X32
+ SNDRV_PCM_IOCTL_CHANNEL_INFO_X32 = _IOR('A', 0x32, struct snd_pcm_channel_info),
+ SNDRV_PCM_IOCTL_STATUS_X32 = _IOR('A', 0x20, struct snd_pcm_status_x32),
+ SNDRV_PCM_IOCTL_STATUS_EXT_X32 = _IOWR('A', 0x24, struct snd_pcm_status_x32),
+ SNDRV_PCM_IOCTL_SYNC_PTR_X32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr_x32),
+#endif /* CONFIG_X86_X32 */
};
static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -554,6 +724,16 @@ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned l
return snd_pcm_ioctl_rewind_compat(substream, argp);
case SNDRV_PCM_IOCTL_FORWARD32:
return snd_pcm_ioctl_forward_compat(substream, argp);
+#ifdef CONFIG_X86_X32
+ case SNDRV_PCM_IOCTL_STATUS_X32:
+ return snd_pcm_status_user_x32(substream, argp, false);
+ case SNDRV_PCM_IOCTL_STATUS_EXT_X32:
+ return snd_pcm_status_user_x32(substream, argp, true);
+ case SNDRV_PCM_IOCTL_SYNC_PTR_X32:
+ return snd_pcm_ioctl_sync_ptr_x32(substream, argp);
+ case SNDRV_PCM_IOCTL_CHANNEL_INFO_X32:
+ return snd_pcm_ioctl_channel_info_x32(substream, argp);
+#endif /* CONFIG_X86_X32 */
}
return -ENOIOCTLCMD;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 6b5a811e01a5..3a9b66c6e09c 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -322,7 +322,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
char name[16];
snd_pcm_debug_name(substream, name, sizeof(name));
pcm_err(substream->pcm,
- "BUG: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
+ "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
name, pos, runtime->buffer_size,
runtime->period_size);
}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index a8b27cdc2844..4ba64fd49759 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -74,6 +74,18 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
static DEFINE_RWLOCK(snd_pcm_link_rwlock);
static DECLARE_RWSEM(snd_pcm_link_rwsem);
+/* Writer in rwsem may block readers even during its waiting in queue,
+ * and this may lead to a deadlock when the code path takes read sem
+ * twice (e.g. one in snd_pcm_action_nonatomic() and another in
+ * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
+ * spin until it gets the lock.
+ */
+static inline void down_write_nonblock(struct rw_semaphore *lock)
+{
+ while (!down_write_trylock(lock))
+ cond_resched();
+}
+
/**
* snd_pcm_stream_lock - Lock the PCM stream
* @substream: PCM substream
@@ -1813,7 +1825,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
res = -ENOMEM;
goto _nolock;
}
- down_write(&snd_pcm_link_rwsem);
+ down_write_nonblock(&snd_pcm_link_rwsem);
write_lock_irq(&snd_pcm_link_rwlock);
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
substream->runtime->status->state != substream1->runtime->status->state ||
@@ -1860,7 +1872,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
struct snd_pcm_substream *s;
int res = 0;
- down_write(&snd_pcm_link_rwsem);
+ down_write_nonblock(&snd_pcm_link_rwsem);
write_lock_irq(&snd_pcm_link_rwlock);
if (!snd_pcm_stream_linked(substream)) {
res = -EALREADY;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index a7759846fbaa..795437b10082 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -942,31 +942,36 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
unsigned long flags;
long result = 0, count1;
struct snd_rawmidi_runtime *runtime = substream->runtime;
+ unsigned long appl_ptr;
+ spin_lock_irqsave(&runtime->lock, flags);
while (count > 0 && runtime->avail) {
count1 = runtime->buffer_size - runtime->appl_ptr;
if (count1 > count)
count1 = count;
- spin_lock_irqsave(&runtime->lock, flags);
if (count1 > (int)runtime->avail)
count1 = runtime->avail;
+
+ /* update runtime->appl_ptr before unlocking for userbuf */
+ appl_ptr = runtime->appl_ptr;
+ runtime->appl_ptr += count1;
+ runtime->appl_ptr %= runtime->buffer_size;
+ runtime->avail -= count1;
+
if (kernelbuf)
- memcpy(kernelbuf + result, runtime->buffer + runtime->appl_ptr, count1);
+ memcpy(kernelbuf + result, runtime->buffer + appl_ptr, count1);
if (userbuf) {
spin_unlock_irqrestore(&runtime->lock, flags);
if (copy_to_user(userbuf + result,
- runtime->buffer + runtime->appl_ptr, count1)) {
+ runtime->buffer + appl_ptr, count1)) {
return result > 0 ? result : -EFAULT;
}
spin_lock_irqsave(&runtime->lock, flags);
}
- runtime->appl_ptr += count1;
- runtime->appl_ptr %= runtime->buffer_size;
- runtime->avail -= count1;
- spin_unlock_irqrestore(&runtime->lock, flags);
result += count1;
count -= count1;
}
+ spin_unlock_irqrestore(&runtime->lock, flags);
return result;
}
@@ -1055,23 +1060,16 @@ int snd_rawmidi_transmit_empty(struct snd_rawmidi_substream *substream)
EXPORT_SYMBOL(snd_rawmidi_transmit_empty);
/**
- * snd_rawmidi_transmit_peek - copy data from the internal buffer
+ * __snd_rawmidi_transmit_peek - copy data from the internal buffer
* @substream: the rawmidi substream
* @buffer: the buffer pointer
* @count: data size to transfer
*
- * Copies data from the internal output buffer to the given buffer.
- *
- * Call this in the interrupt handler when the midi output is ready,
- * and call snd_rawmidi_transmit_ack() after the transmission is
- * finished.
- *
- * Return: The size of copied data, or a negative error code on failure.
+ * This is a variant of snd_rawmidi_transmit_peek() without spinlock.
*/
-int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
unsigned char *buffer, int count)
{
- unsigned long flags;
int result, count1;
struct snd_rawmidi_runtime *runtime = substream->runtime;
@@ -1081,7 +1079,6 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
return -EINVAL;
}
result = 0;
- spin_lock_irqsave(&runtime->lock, flags);
if (runtime->avail >= runtime->buffer_size) {
/* warning: lowlevel layer MUST trigger down the hardware */
goto __skip;
@@ -1106,25 +1103,47 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
}
}
__skip:
+ return result;
+}
+EXPORT_SYMBOL(__snd_rawmidi_transmit_peek);
+
+/**
+ * snd_rawmidi_transmit_peek - copy data from the internal buffer
+ * @substream: the rawmidi substream
+ * @buffer: the buffer pointer
+ * @count: data size to transfer
+ *
+ * Copies data from the internal output buffer to the given buffer.
+ *
+ * Call this in the interrupt handler when the midi output is ready,
+ * and call snd_rawmidi_transmit_ack() after the transmission is
+ * finished.
+ *
+ * Return: The size of copied data, or a negative error code on failure.
+ */
+int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ unsigned char *buffer, int count)
+{
+ struct snd_rawmidi_runtime *runtime = substream->runtime;
+ int result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&runtime->lock, flags);
+ result = __snd_rawmidi_transmit_peek(substream, buffer, count);
spin_unlock_irqrestore(&runtime->lock, flags);
return result;
}
EXPORT_SYMBOL(snd_rawmidi_transmit_peek);
/**
- * snd_rawmidi_transmit_ack - acknowledge the transmission
+ * __snd_rawmidi_transmit_ack - acknowledge the transmission
* @substream: the rawmidi substream
* @count: the transferred count
*
- * Advances the hardware pointer for the internal output buffer with
- * the given size and updates the condition.
- * Call after the transmission is finished.
- *
- * Return: The advanced size if successful, or a negative error code on failure.
+ * This is a variant of __snd_rawmidi_transmit_ack() without spinlock.
*/
-int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
{
- unsigned long flags;
struct snd_rawmidi_runtime *runtime = substream->runtime;
if (runtime->buffer == NULL) {
@@ -1132,7 +1151,6 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
"snd_rawmidi_transmit_ack: output is not active!!!\n");
return -EINVAL;
}
- spin_lock_irqsave(&runtime->lock, flags);
snd_BUG_ON(runtime->avail + count > runtime->buffer_size);
runtime->hw_ptr += count;
runtime->hw_ptr %= runtime->buffer_size;
@@ -1142,9 +1160,32 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
if (runtime->drain || snd_rawmidi_ready(substream))
wake_up(&runtime->sleep);
}
- spin_unlock_irqrestore(&runtime->lock, flags);
return count;
}
+EXPORT_SYMBOL(__snd_rawmidi_transmit_ack);
+
+/**
+ * snd_rawmidi_transmit_ack - acknowledge the transmission
+ * @substream: the rawmidi substream
+ * @count: the transferred count
+ *
+ * Advances the hardware pointer for the internal output buffer with
+ * the given size and updates the condition.
+ * Call after the transmission is finished.
+ *
+ * Return: The advanced size if successful, or a negative error code on failure.
+ */
+int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+{
+ struct snd_rawmidi_runtime *runtime = substream->runtime;
+ int result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&runtime->lock, flags);
+ result = __snd_rawmidi_transmit_ack(substream, count);
+ spin_unlock_irqrestore(&runtime->lock, flags);
+ return result;
+}
EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
/**
@@ -1160,12 +1201,22 @@ EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
unsigned char *buffer, int count)
{
+ struct snd_rawmidi_runtime *runtime = substream->runtime;
+ int result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&runtime->lock, flags);
if (!substream->opened)
- return -EBADFD;
- count = snd_rawmidi_transmit_peek(substream, buffer, count);
- if (count < 0)
- return count;
- return snd_rawmidi_transmit_ack(substream, count);
+ result = -EBADFD;
+ else {
+ count = __snd_rawmidi_transmit_peek(substream, buffer, count);
+ if (count <= 0)
+ result = count;
+ else
+ result = __snd_rawmidi_transmit_ack(substream, count);
+ }
+ spin_unlock_irqrestore(&runtime->lock, flags);
+ return result;
}
EXPORT_SYMBOL(snd_rawmidi_transmit);
@@ -1177,8 +1228,9 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
unsigned long flags;
long count1, result;
struct snd_rawmidi_runtime *runtime = substream->runtime;
+ unsigned long appl_ptr;
- if (snd_BUG_ON(!kernelbuf && !userbuf))
+ if (!kernelbuf && !userbuf)
return -EINVAL;
if (snd_BUG_ON(!runtime->buffer))
return -EINVAL;
@@ -1197,12 +1249,19 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
count1 = count;
if (count1 > (long)runtime->avail)
count1 = runtime->avail;
+
+ /* update runtime->appl_ptr before unlocking for userbuf */
+ appl_ptr = runtime->appl_ptr;
+ runtime->appl_ptr += count1;
+ runtime->appl_ptr %= runtime->buffer_size;
+ runtime->avail -= count1;
+
if (kernelbuf)
- memcpy(runtime->buffer + runtime->appl_ptr,
+ memcpy(runtime->buffer + appl_ptr,
kernelbuf + result, count1);
else if (userbuf) {
spin_unlock_irqrestore(&runtime->lock, flags);
- if (copy_from_user(runtime->buffer + runtime->appl_ptr,
+ if (copy_from_user(runtime->buffer + appl_ptr,
userbuf + result, count1)) {
spin_lock_irqsave(&runtime->lock, flags);
result = result > 0 ? result : -EFAULT;
@@ -1210,9 +1269,6 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
}
spin_lock_irqsave(&runtime->lock, flags);
}
- runtime->appl_ptr += count1;
- runtime->appl_ptr %= runtime->buffer_size;
- runtime->avail -= count1;
result += count1;
count -= count1;
}
diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
index 5268c1f58c25..09a89094dcf7 100644
--- a/sound/core/rawmidi_compat.c
+++ b/sound/core/rawmidi_compat.c
@@ -94,9 +94,58 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
return 0;
}
+#ifdef CONFIG_X86_X32
+/* X32 ABI has 64bit timespec and 64bit alignment */
+struct snd_rawmidi_status_x32 {
+ s32 stream;
+ u32 rsvd; /* alignment */
+ struct timespec tstamp;
+ u32 avail;
+ u32 xruns;
+ unsigned char reserved[16];
+} __attribute__((packed));
+
+#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
+
+static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
+ struct snd_rawmidi_status_x32 __user *src)
+{
+ int err;
+ struct snd_rawmidi_status status;
+
+ if (rfile->output == NULL)
+ return -EINVAL;
+ if (get_user(status.stream, &src->stream))
+ return -EFAULT;
+
+ switch (status.stream) {
+ case SNDRV_RAWMIDI_STREAM_OUTPUT:
+ err = snd_rawmidi_output_status(rfile->output, &status);
+ break;
+ case SNDRV_RAWMIDI_STREAM_INPUT:
+ err = snd_rawmidi_input_status(rfile->input, &status);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ if (put_timespec(&status.tstamp, &src->tstamp) ||
+ put_user(status.avail, &src->avail) ||
+ put_user(status.xruns, &src->xruns))
+ return -EFAULT;
+
+ return 0;
+}
+#endif /* CONFIG_X86_X32 */
+
enum {
SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32),
SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32),
+#ifdef CONFIG_X86_X32
+ SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32),
+#endif /* CONFIG_X86_X32 */
};
static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -115,6 +164,10 @@ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsign
return snd_rawmidi_ioctl_params_compat(rfile, argp);
case SNDRV_RAWMIDI_IOCTL_STATUS32:
return snd_rawmidi_ioctl_status_compat(rfile, argp);
+#ifdef CONFIG_X86_X32
+ case SNDRV_RAWMIDI_IOCTL_STATUS_X32:
+ return snd_rawmidi_ioctl_status_x32(rfile, argp);
+#endif /* CONFIG_X86_X32 */
}
return -ENOIOCTLCMD;
}
diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
index 7354b8bed860..cb23899100ee 100644
--- a/sound/core/seq/oss/seq_oss.c
+++ b/sound/core/seq/oss/seq_oss.c
@@ -148,8 +148,6 @@ odev_release(struct inode *inode, struct file *file)
if ((dp = file->private_data) == NULL)
return 0;
- snd_seq_oss_drain_write(dp);
-
mutex_lock(&register_mutex);
snd_seq_oss_release(dp);
mutex_unlock(&register_mutex);
diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h
index b43924325249..d7b4d016b547 100644
--- a/sound/core/seq/oss/seq_oss_device.h
+++ b/sound/core/seq/oss/seq_oss_device.h
@@ -127,7 +127,6 @@ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int co
unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait);
void snd_seq_oss_reset(struct seq_oss_devinfo *dp);
-void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp);
/* */
void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time);
diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
index b1221b29728e..92c96a95a903 100644
--- a/sound/core/seq/oss/seq_oss_init.c
+++ b/sound/core/seq/oss/seq_oss_init.c
@@ -202,7 +202,7 @@ snd_seq_oss_open(struct file *file, int level)
dp->index = i;
if (i >= SNDRV_SEQ_OSS_MAX_CLIENTS) {
- pr_err("ALSA: seq_oss: too many applications\n");
+ pr_debug("ALSA: seq_oss: too many applications\n");
rc = -ENOMEM;
goto _error;
}
@@ -436,22 +436,6 @@ snd_seq_oss_release(struct seq_oss_devinfo *dp)
/*
- * Wait until the queue is empty (if we don't have nonblock)
- */
-void
-snd_seq_oss_drain_write(struct seq_oss_devinfo *dp)
-{
- if (! dp->timer->running)
- return;
- if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) &&
- dp->writeq) {
- while (snd_seq_oss_writeq_sync(dp->writeq))
- ;
- }
-}
-
-
-/*
* reset sequencer devices
*/
void
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index 0f3b38184fe5..b16dbef04174 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -308,7 +308,7 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
struct seq_oss_synth *rec;
struct seq_oss_synthinfo *info;
- if (snd_BUG_ON(dp->max_synthdev >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
+ if (snd_BUG_ON(dp->max_synthdev > SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
return;
for (i = 0; i < dp->max_synthdev; i++) {
info = &dp->synths[i];
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index b64f20deba90..58e79e02f217 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -678,6 +678,9 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
else
down_read(&grp->list_mutex);
list_for_each_entry(subs, &grp->list_head, src_list) {
+ /* both ports ready? */
+ if (atomic_read(&subs->ref_count) != 2)
+ continue;
event->dest = subs->info.dest;
if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
/* convert time according to flag with subscription */
@@ -1962,7 +1965,7 @@ static int snd_seq_ioctl_remove_events(struct snd_seq_client *client,
* No restrictions so for a user client we can clear
* the whole fifo
*/
- if (client->type == USER_CLIENT)
+ if (client->type == USER_CLIENT && client->data.user.fifo)
snd_seq_fifo_clear(client->data.user.fifo);
}
diff --git a/sound/core/seq/seq_compat.c b/sound/core/seq/seq_compat.c
index 81f7c109dc46..65175902a68a 100644
--- a/sound/core/seq/seq_compat.c
+++ b/sound/core/seq/seq_compat.c
@@ -49,11 +49,12 @@ static int snd_seq_call_port_info_ioctl(struct snd_seq_client *client, unsigned
struct snd_seq_port_info *data;
mm_segment_t fs;
- data = memdup_user(data32, sizeof(*data32));
- if (IS_ERR(data))
- return PTR_ERR(data);
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- if (get_user(data->flags, &data32->flags) ||
+ if (copy_from_user(data, data32, sizeof(*data32)) ||
+ get_user(data->flags, &data32->flags) ||
get_user(data->time_queue, &data32->time_queue))
goto error;
data->kernel = NULL;
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 801076687bb1..c850345c43b5 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -383,15 +383,20 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
if (snd_BUG_ON(!pool))
return -EINVAL;
- if (pool->ptr) /* should be atomic? */
- return 0;
- pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
- if (!pool->ptr)
+ cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
+ if (!cellptr)
return -ENOMEM;
/* add new cells to the free cell list */
spin_lock_irqsave(&pool->lock, flags);
+ if (pool->ptr) {
+ spin_unlock_irqrestore(&pool->lock, flags);
+ vfree(cellptr);
+ return 0;
+ }
+
+ pool->ptr = cellptr;
pool->free = NULL;
for (cell = 0; cell < pool->size; cell++) {
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index 55170a20ae72..fe686ee41c6d 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -173,10 +173,6 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
}
/* */
-enum group_type {
- SRC_LIST, DEST_LIST
-};
-
static int subscribe_port(struct snd_seq_client *client,
struct snd_seq_client_port *port,
struct snd_seq_port_subs_info *grp,
@@ -203,6 +199,20 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr,
return NULL;
}
+static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+ struct snd_seq_client_port *port,
+ struct snd_seq_subscribers *subs,
+ bool is_src, bool ack);
+
+static inline struct snd_seq_subscribers *
+get_subscriber(struct list_head *p, bool is_src)
+{
+ if (is_src)
+ return list_entry(p, struct snd_seq_subscribers, src_list);
+ else
+ return list_entry(p, struct snd_seq_subscribers, dest_list);
+}
+
/*
* remove all subscribers on the list
* this is called from port_delete, for each src and dest list.
@@ -210,7 +220,7 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr,
static void clear_subscriber_list(struct snd_seq_client *client,
struct snd_seq_client_port *port,
struct snd_seq_port_subs_info *grp,
- int grptype)
+ int is_src)
{
struct list_head *p, *n;
@@ -219,15 +229,13 @@ static void clear_subscriber_list(struct snd_seq_client *client,
struct snd_seq_client *c;
struct snd_seq_client_port *aport;
- if (grptype == SRC_LIST) {
- subs = list_entry(p, struct snd_seq_subscribers, src_list);
+ subs = get_subscriber(p, is_src);
+ if (is_src)
aport = get_client_port(&subs->info.dest, &c);
- } else {
- subs = list_entry(p, struct snd_seq_subscribers, dest_list);
+ else
aport = get_client_port(&subs->info.sender, &c);
- }
- list_del(p);
- unsubscribe_port(client, port, grp, &subs->info, 0);
+ delete_and_unsubscribe_port(client, port, subs, is_src, false);
+
if (!aport) {
/* looks like the connected port is being deleted.
* we decrease the counter, and when both ports are deleted
@@ -235,21 +243,14 @@ static void clear_subscriber_list(struct snd_seq_client *client,
*/
if (atomic_dec_and_test(&subs->ref_count))
kfree(subs);
- } else {
- /* ok we got the connected port */
- struct snd_seq_port_subs_info *agrp;
- agrp = (grptype == SRC_LIST) ? &aport->c_dest : &aport->c_src;
- down_write(&agrp->list_mutex);
- if (grptype == SRC_LIST)
- list_del(&subs->dest_list);
- else
- list_del(&subs->src_list);
- up_write(&agrp->list_mutex);
- unsubscribe_port(c, aport, agrp, &subs->info, 1);
- kfree(subs);
- snd_seq_port_unlock(aport);
- snd_seq_client_unlock(c);
+ continue;
}
+
+ /* ok we got the connected port */
+ delete_and_unsubscribe_port(c, aport, subs, !is_src, true);
+ kfree(subs);
+ snd_seq_port_unlock(aport);
+ snd_seq_client_unlock(c);
}
}
@@ -262,8 +263,8 @@ static int port_delete(struct snd_seq_client *client,
snd_use_lock_sync(&port->use_lock);
/* clear subscribers info */
- clear_subscriber_list(client, port, &port->c_src, SRC_LIST);
- clear_subscriber_list(client, port, &port->c_dest, DEST_LIST);
+ clear_subscriber_list(client, port, &port->c_src, true);
+ clear_subscriber_list(client, port, &port->c_dest, false);
if (port->private_free)
port->private_free(port->private_data);
@@ -479,85 +480,123 @@ static int match_subs_info(struct snd_seq_port_subscribe *r,
return 0;
}
-
-/* connect two ports */
-int snd_seq_port_connect(struct snd_seq_client *connector,
- struct snd_seq_client *src_client,
- struct snd_seq_client_port *src_port,
- struct snd_seq_client *dest_client,
- struct snd_seq_client_port *dest_port,
- struct snd_seq_port_subscribe *info)
+static int check_and_subscribe_port(struct snd_seq_client *client,
+ struct snd_seq_client_port *port,
+ struct snd_seq_subscribers *subs,
+ bool is_src, bool exclusive, bool ack)
{
- struct snd_seq_port_subs_info *src = &src_port->c_src;
- struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
- struct snd_seq_subscribers *subs, *s;
- int err, src_called = 0;
- unsigned long flags;
- int exclusive;
-
- subs = kzalloc(sizeof(*subs), GFP_KERNEL);
- if (! subs)
- return -ENOMEM;
-
- subs->info = *info;
- atomic_set(&subs->ref_count, 2);
+ struct snd_seq_port_subs_info *grp;
+ struct list_head *p;
+ struct snd_seq_subscribers *s;
+ int err;
- down_write(&src->list_mutex);
- down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
-
- exclusive = info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE ? 1 : 0;
+ grp = is_src ? &port->c_src : &port->c_dest;
err = -EBUSY;
+ down_write(&grp->list_mutex);
if (exclusive) {
- if (! list_empty(&src->list_head) || ! list_empty(&dest->list_head))
+ if (!list_empty(&grp->list_head))
goto __error;
} else {
- if (src->exclusive || dest->exclusive)
+ if (grp->exclusive)
goto __error;
/* check whether already exists */
- list_for_each_entry(s, &src->list_head, src_list) {
- if (match_subs_info(info, &s->info))
- goto __error;
- }
- list_for_each_entry(s, &dest->list_head, dest_list) {
- if (match_subs_info(info, &s->info))
+ list_for_each(p, &grp->list_head) {
+ s = get_subscriber(p, is_src);
+ if (match_subs_info(&subs->info, &s->info))
goto __error;
}
}
- if ((err = subscribe_port(src_client, src_port, src, info,
- connector->number != src_client->number)) < 0)
- goto __error;
- src_called = 1;
-
- if ((err = subscribe_port(dest_client, dest_port, dest, info,
- connector->number != dest_client->number)) < 0)
+ err = subscribe_port(client, port, grp, &subs->info, ack);
+ if (err < 0) {
+ grp->exclusive = 0;
goto __error;
+ }
/* add to list */
- write_lock_irqsave(&src->list_lock, flags);
- // write_lock(&dest->list_lock); // no other lock yet
- list_add_tail(&subs->src_list, &src->list_head);
- list_add_tail(&subs->dest_list, &dest->list_head);
- // write_unlock(&dest->list_lock); // no other lock yet
- write_unlock_irqrestore(&src->list_lock, flags);
+ write_lock_irq(&grp->list_lock);
+ if (is_src)
+ list_add_tail(&subs->src_list, &grp->list_head);
+ else
+ list_add_tail(&subs->dest_list, &grp->list_head);
+ grp->exclusive = exclusive;
+ atomic_inc(&subs->ref_count);
+ write_unlock_irq(&grp->list_lock);
+ err = 0;
- src->exclusive = dest->exclusive = exclusive;
+ __error:
+ up_write(&grp->list_mutex);
+ return err;
+}
+
+static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+ struct snd_seq_client_port *port,
+ struct snd_seq_subscribers *subs,
+ bool is_src, bool ack)
+{
+ struct snd_seq_port_subs_info *grp;
+ struct list_head *list;
+ bool empty;
+
+ grp = is_src ? &port->c_src : &port->c_dest;
+ list = is_src ? &subs->src_list : &subs->dest_list;
+ down_write(&grp->list_mutex);
+ write_lock_irq(&grp->list_lock);
+ empty = list_empty(list);
+ if (!empty)
+ list_del_init(list);
+ grp->exclusive = 0;
+ write_unlock_irq(&grp->list_lock);
+ up_write(&grp->list_mutex);
+
+ if (!empty)
+ unsubscribe_port(client, port, grp, &subs->info, ack);
+}
+
+/* connect two ports */
+int snd_seq_port_connect(struct snd_seq_client *connector,
+ struct snd_seq_client *src_client,
+ struct snd_seq_client_port *src_port,
+ struct snd_seq_client *dest_client,
+ struct snd_seq_client_port *dest_port,
+ struct snd_seq_port_subscribe *info)
+{
+ struct snd_seq_subscribers *subs;
+ bool exclusive;
+ int err;
+
+ subs = kzalloc(sizeof(*subs), GFP_KERNEL);
+ if (!subs)
+ return -ENOMEM;
+
+ subs->info = *info;
+ atomic_set(&subs->ref_count, 0);
+ INIT_LIST_HEAD(&subs->src_list);
+ INIT_LIST_HEAD(&subs->dest_list);
+
+ exclusive = !!(info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE);
+
+ err = check_and_subscribe_port(src_client, src_port, subs, true,
+ exclusive,
+ connector->number != src_client->number);
+ if (err < 0)
+ goto error;
+ err = check_and_subscribe_port(dest_client, dest_port, subs, false,
+ exclusive,
+ connector->number != dest_client->number);
+ if (err < 0)
+ goto error_dest;
- up_write(&dest->list_mutex);
- up_write(&src->list_mutex);
return 0;
- __error:
- if (src_called)
- unsubscribe_port(src_client, src_port, src, info,
- connector->number != src_client->number);
+ error_dest:
+ delete_and_unsubscribe_port(src_client, src_port, subs, true,
+ connector->number != src_client->number);
+ error:
kfree(subs);
- up_write(&dest->list_mutex);
- up_write(&src->list_mutex);
return err;
}
-
/* remove the connection */
int snd_seq_port_disconnect(struct snd_seq_client *connector,
struct snd_seq_client *src_client,
@@ -567,37 +606,28 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,
struct snd_seq_port_subscribe *info)
{
struct snd_seq_port_subs_info *src = &src_port->c_src;
- struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
struct snd_seq_subscribers *subs;
int err = -ENOENT;
- unsigned long flags;
down_write(&src->list_mutex);
- down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
-
/* look for the connection */
list_for_each_entry(subs, &src->list_head, src_list) {
if (match_subs_info(info, &subs->info)) {
- write_lock_irqsave(&src->list_lock, flags);
- // write_lock(&dest->list_lock); // no lock yet
- list_del(&subs->src_list);
- list_del(&subs->dest_list);
- // write_unlock(&dest->list_lock);
- write_unlock_irqrestore(&src->list_lock, flags);
- src->exclusive = dest->exclusive = 0;
- unsubscribe_port(src_client, src_port, src, info,
- connector->number != src_client->number);
- unsubscribe_port(dest_client, dest_port, dest, info,
- connector->number != dest_client->number);
- kfree(subs);
+ atomic_dec(&subs->ref_count); /* mark as not ready */
err = 0;
break;
}
}
-
- up_write(&dest->list_mutex);
up_write(&src->list_mutex);
- return err;
+ if (err < 0)
+ return err;
+
+ delete_and_unsubscribe_port(src_client, src_port, subs, true,
+ connector->number != src_client->number);
+ delete_and_unsubscribe_port(dest_client, dest_port, subs, false,
+ connector->number != dest_client->number);
+ kfree(subs);
+ return 0;
}
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index 7dfd0f429410..0bec02e89d51 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -142,8 +142,10 @@ static struct snd_seq_queue *queue_new(int owner, int locked)
static void queue_delete(struct snd_seq_queue *q)
{
/* stop and release the timer */
+ mutex_lock(&q->timer_mutex);
snd_seq_timer_stop(q->timer);
snd_seq_timer_close(q);
+ mutex_unlock(&q->timer_mutex);
/* wait until access free */
snd_use_lock_sync(&q->use_lock);
/* release resources... */
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index 82b220c769c1..293104926098 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -90,6 +90,9 @@ void snd_seq_timer_delete(struct snd_seq_timer **tmr)
void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tmr->lock, flags);
/* setup defaults */
tmr->ppq = 96; /* 96 PPQ */
tmr->tempo = 500000; /* 120 BPM */
@@ -105,21 +108,25 @@ void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
tmr->preferred_resolution = seq_default_timer_resolution;
tmr->skew = tmr->skew_base = SKEW_BASE;
+ spin_unlock_irqrestore(&tmr->lock, flags);
}
-void snd_seq_timer_reset(struct snd_seq_timer * tmr)
+static void seq_timer_reset(struct snd_seq_timer *tmr)
{
- unsigned long flags;
-
- spin_lock_irqsave(&tmr->lock, flags);
-
/* reset time & songposition */
tmr->cur_time.tv_sec = 0;
tmr->cur_time.tv_nsec = 0;
tmr->tick.cur_tick = 0;
tmr->tick.fraction = 0;
+}
+
+void snd_seq_timer_reset(struct snd_seq_timer *tmr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&tmr->lock, flags);
+ seq_timer_reset(tmr);
spin_unlock_irqrestore(&tmr->lock, flags);
}
@@ -138,8 +145,11 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri,
tmr = q->timer;
if (tmr == NULL)
return;
- if (!tmr->running)
+ spin_lock_irqsave(&tmr->lock, flags);
+ if (!tmr->running) {
+ spin_unlock_irqrestore(&tmr->lock, flags);
return;
+ }
resolution *= ticks;
if (tmr->skew != tmr->skew_base) {
@@ -148,8 +158,6 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri,
(((resolution & 0xffff) * tmr->skew) >> 16);
}
- spin_lock_irqsave(&tmr->lock, flags);
-
/* update timer */
snd_seq_inc_time_nsec(&tmr->cur_time, resolution);
@@ -296,26 +304,30 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
t->callback = snd_seq_timer_interrupt;
t->callback_data = q;
t->flags |= SNDRV_TIMER_IFLG_AUTO;
+ spin_lock_irq(&tmr->lock);
tmr->timeri = t;
+ spin_unlock_irq(&tmr->lock);
return 0;
}
int snd_seq_timer_close(struct snd_seq_queue *q)
{
struct snd_seq_timer *tmr;
+ struct snd_timer_instance *t;
tmr = q->timer;
if (snd_BUG_ON(!tmr))
return -EINVAL;
- if (tmr->timeri) {
- snd_timer_stop(tmr->timeri);
- snd_timer_close(tmr->timeri);
- tmr->timeri = NULL;
- }
+ spin_lock_irq(&tmr->lock);
+ t = tmr->timeri;
+ tmr->timeri = NULL;
+ spin_unlock_irq(&tmr->lock);
+ if (t)
+ snd_timer_close(t);
return 0;
}
-int snd_seq_timer_stop(struct snd_seq_timer * tmr)
+static int seq_timer_stop(struct snd_seq_timer *tmr)
{
if (! tmr->timeri)
return -EINVAL;
@@ -326,6 +338,17 @@ int snd_seq_timer_stop(struct snd_seq_timer * tmr)
return 0;
}
+int snd_seq_timer_stop(struct snd_seq_timer *tmr)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&tmr->lock, flags);
+ err = seq_timer_stop(tmr);
+ spin_unlock_irqrestore(&tmr->lock, flags);
+ return err;
+}
+
static int initialize_timer(struct snd_seq_timer *tmr)
{
struct snd_timer *t;
@@ -358,13 +381,13 @@ static int initialize_timer(struct snd_seq_timer *tmr)
return 0;
}
-int snd_seq_timer_start(struct snd_seq_timer * tmr)
+static int seq_timer_start(struct snd_seq_timer *tmr)
{
if (! tmr->timeri)
return -EINVAL;
if (tmr->running)
- snd_seq_timer_stop(tmr);
- snd_seq_timer_reset(tmr);
+ seq_timer_stop(tmr);
+ seq_timer_reset(tmr);
if (initialize_timer(tmr) < 0)
return -EINVAL;
snd_timer_start(tmr->timeri, tmr->ticks);
@@ -373,14 +396,25 @@ int snd_seq_timer_start(struct snd_seq_timer * tmr)
return 0;
}
-int snd_seq_timer_continue(struct snd_seq_timer * tmr)
+int snd_seq_timer_start(struct snd_seq_timer *tmr)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&tmr->lock, flags);
+ err = seq_timer_start(tmr);
+ spin_unlock_irqrestore(&tmr->lock, flags);
+ return err;
+}
+
+static int seq_timer_continue(struct snd_seq_timer *tmr)
{
if (! tmr->timeri)
return -EINVAL;
if (tmr->running)
return -EBUSY;
if (! tmr->initialized) {
- snd_seq_timer_reset(tmr);
+ seq_timer_reset(tmr);
if (initialize_timer(tmr) < 0)
return -EINVAL;
}
@@ -390,11 +424,24 @@ int snd_seq_timer_continue(struct snd_seq_timer * tmr)
return 0;
}
+int snd_seq_timer_continue(struct snd_seq_timer *tmr)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&tmr->lock, flags);
+ err = seq_timer_continue(tmr);
+ spin_unlock_irqrestore(&tmr->lock, flags);
+ return err;
+}
+
/* return current 'real' time. use timeofday() to get better granularity. */
snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
{
snd_seq_real_time_t cur_time;
+ unsigned long flags;
+ spin_lock_irqsave(&tmr->lock, flags);
cur_time = tmr->cur_time;
if (tmr->running) {
struct timeval tm;
@@ -410,7 +457,7 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
}
snd_seq_sanity_real_time(&cur_time);
}
-
+ spin_unlock_irqrestore(&tmr->lock, flags);
return cur_time;
}
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 56e0f4cd3f82..81134e067184 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -155,21 +155,26 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
struct snd_virmidi *vmidi = substream->runtime->private_data;
int count, res;
unsigned char buf[32], *pbuf;
+ unsigned long flags;
if (up) {
vmidi->trigger = 1;
if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH &&
!(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) {
- snd_rawmidi_transmit_ack(substream, substream->runtime->buffer_size - substream->runtime->avail);
- return; /* ignored */
+ while (snd_rawmidi_transmit(substream, buf,
+ sizeof(buf)) > 0) {
+ /* ignored */
+ }
+ return;
}
if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
return;
vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
}
+ spin_lock_irqsave(&substream->runtime->lock, flags);
while (1) {
- count = snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
+ count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
if (count <= 0)
break;
pbuf = buf;
@@ -179,16 +184,18 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
snd_midi_event_reset_encode(vmidi->parser);
continue;
}
- snd_rawmidi_transmit_ack(substream, res);
+ __snd_rawmidi_transmit_ack(substream, res);
pbuf += res;
count -= res;
if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
- return;
+ goto out;
vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
}
}
}
+ out:
+ spin_unlock_irqrestore(&substream->runtime->lock, flags);
} else {
vmidi->trigger = 0;
}
@@ -254,9 +261,13 @@ static int snd_virmidi_output_open(struct snd_rawmidi_substream *substream)
*/
static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
{
+ struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
struct snd_virmidi *vmidi = substream->runtime->private_data;
- snd_midi_event_free(vmidi->parser);
+
+ write_lock_irq(&rdev->filelist_lock);
list_del(&vmidi->list);
+ write_unlock_irq(&rdev->filelist_lock);
+ snd_midi_event_free(vmidi->parser);
substream->runtime->private_data = NULL;
kfree(vmidi);
return 0;
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 8b06413e967e..b982d1b089bd 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -65,6 +65,7 @@ struct snd_timer_user {
int qtail;
int qused;
int queue_size;
+ bool disconnected;
struct snd_timer_read *queue;
struct snd_timer_tread *tqueue;
spinlock_t qlock;
@@ -73,7 +74,7 @@ struct snd_timer_user {
struct timespec tstamp; /* trigger tstamp */
wait_queue_head_t qchange_sleep;
struct fasync_struct *fasync;
- struct mutex tread_sem;
+ struct mutex ioctl_lock;
};
/* list of timers */
@@ -290,6 +291,9 @@ int snd_timer_open(struct snd_timer_instance **ti,
mutex_unlock(&register_mutex);
return -ENOMEM;
}
+ /* take a card refcount for safe disconnection */
+ if (timer->card)
+ get_device(&timer->card->card_dev);
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = slave_id;
if (list_empty(&timer->open_list_head) && timer->hw.open)
@@ -301,8 +305,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
return 0;
}
-static int _snd_timer_stop(struct snd_timer_instance *timeri,
- int keep_flag, int event);
+static int _snd_timer_stop(struct snd_timer_instance *timeri, int event);
/*
* close a timer instance
@@ -344,7 +347,7 @@ int snd_timer_close(struct snd_timer_instance *timeri)
spin_unlock_irq(&timer->lock);
mutex_lock(&register_mutex);
list_del(&timeri->open_list);
- if (timer && list_empty(&timer->open_list_head) &&
+ if (list_empty(&timer->open_list_head) &&
timer->hw.close)
timer->hw.close(timer);
/* remove slave links */
@@ -360,6 +363,9 @@ int snd_timer_close(struct snd_timer_instance *timeri)
}
spin_unlock(&timer->lock);
spin_unlock_irq(&slave_active_lock);
+ /* release a card refcount for safe disconnection */
+ if (timer->card)
+ put_device(&timer->card->card_dev);
mutex_unlock(&register_mutex);
}
out:
@@ -416,7 +422,7 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
spin_lock_irqsave(&timer->lock, flags);
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
- ts->ccallback(ti, event + 100, &tstamp, resolution);
+ ts->ccallback(ts, event + 100, &tstamp, resolution);
spin_unlock_irqrestore(&timer->lock, flags);
}
@@ -445,6 +451,10 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
+ if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
+ spin_unlock_irqrestore(&slave_active_lock, flags);
+ return -EBUSY;
+ }
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
if (timeri->master && timeri->timer) {
spin_lock(&timeri->timer->lock);
@@ -469,23 +479,32 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
return -EINVAL;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
result = snd_timer_start_slave(timeri);
- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+ if (result >= 0)
+ snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
return result;
}
timer = timeri->timer;
if (timer == NULL)
return -EINVAL;
+ if (timer->card && timer->card->shutdown)
+ return -ENODEV;
spin_lock_irqsave(&timer->lock, flags);
+ if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+ SNDRV_TIMER_IFLG_START)) {
+ result = -EBUSY;
+ goto unlock;
+ }
timeri->ticks = timeri->cticks = ticks;
timeri->pticks = 0;
result = snd_timer_start1(timer, timeri, ticks);
+ unlock:
spin_unlock_irqrestore(&timer->lock, flags);
- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+ if (result >= 0)
+ snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
return result;
}
-static int _snd_timer_stop(struct snd_timer_instance * timeri,
- int keep_flag, int event)
+static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
{
struct snd_timer *timer;
unsigned long flags;
@@ -494,21 +513,36 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
return -ENXIO;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
- if (!keep_flag) {
- spin_lock_irqsave(&slave_active_lock, flags);
- timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
- list_del_init(&timeri->ack_list);
- list_del_init(&timeri->active_list);
+ spin_lock_irqsave(&slave_active_lock, flags);
+ if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
spin_unlock_irqrestore(&slave_active_lock, flags);
+ return -EBUSY;
}
+ if (timeri->timer)
+ spin_lock(&timeri->timer->lock);
+ timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+ list_del_init(&timeri->ack_list);
+ list_del_init(&timeri->active_list);
+ if (timeri->timer)
+ spin_unlock(&timeri->timer->lock);
+ spin_unlock_irqrestore(&slave_active_lock, flags);
goto __end;
}
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
+ if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+ SNDRV_TIMER_IFLG_START))) {
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return -EBUSY;
+ }
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
+ if (timer->card && timer->card->shutdown) {
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return 0;
+ }
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
!(--timer->running)) {
timer->hw.stop(timer);
@@ -521,9 +555,7 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
}
}
}
- if (!keep_flag)
- timeri->flags &=
- ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
+ timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
spin_unlock_irqrestore(&timer->lock, flags);
__end:
if (event != SNDRV_TIMER_EVENT_RESOLUTION)
@@ -542,7 +574,7 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
unsigned long flags;
int err;
- err = _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_STOP);
+ err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP);
if (err < 0)
return err;
timer = timeri->timer;
@@ -571,11 +603,18 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
timer = timeri->timer;
if (! timer)
return -EINVAL;
+ if (timer->card && timer->card->shutdown)
+ return -ENODEV;
spin_lock_irqsave(&timer->lock, flags);
+ if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
+ result = -EBUSY;
+ goto unlock;
+ }
if (!timeri->cticks)
timeri->cticks = 1;
timeri->pticks = 0;
result = snd_timer_start1(timer, timeri, timer->sticks);
+ unlock:
spin_unlock_irqrestore(&timer->lock, flags);
snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
return result;
@@ -586,7 +625,7 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
*/
int snd_timer_pause(struct snd_timer_instance * timeri)
{
- return _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_PAUSE);
+ return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE);
}
/*
@@ -634,6 +673,9 @@ static void snd_timer_tasklet(unsigned long arg)
unsigned long resolution, ticks;
unsigned long flags;
+ if (timer->card && timer->card->shutdown)
+ return;
+
spin_lock_irqsave(&timer->lock, flags);
/* now process all callbacks */
while (!list_empty(&timer->sack_list_head)) {
@@ -674,6 +716,9 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
if (timer == NULL)
return;
+ if (timer->card && timer->card->shutdown)
+ return;
+
spin_lock_irqsave(&timer->lock, flags);
/* remember the current resolution */
@@ -703,8 +748,8 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
ti->cticks = ti->ticks;
} else {
ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
- if (--timer->running)
- list_del(&ti->active_list);
+ --timer->running;
+ list_del_init(&ti->active_list);
}
if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
(ti->flags & SNDRV_TIMER_IFLG_FAST))
@@ -884,11 +929,28 @@ static int snd_timer_dev_register(struct snd_device *dev)
return 0;
}
+/* just for reference in snd_timer_dev_disconnect() below */
+static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
+ int event, struct timespec *tstamp,
+ unsigned long resolution);
+
static int snd_timer_dev_disconnect(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
+ struct snd_timer_instance *ti;
+
mutex_lock(&register_mutex);
list_del_init(&timer->device_list);
+ /* wake up pending sleepers */
+ list_for_each_entry(ti, &timer->open_list_head, open_list) {
+ /* FIXME: better to have a ti.disconnect() op */
+ if (ti->ccallback == snd_timer_user_ccallback) {
+ struct snd_timer_user *tu = ti->callback_data;
+
+ tu->disconnected = true;
+ wake_up(&tu->qchange_sleep);
+ }
+ }
mutex_unlock(&register_mutex);
return 0;
}
@@ -899,6 +961,8 @@ void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstam
unsigned long resolution = 0;
struct snd_timer_instance *ti, *ts;
+ if (timer->card && timer->card->shutdown)
+ return;
if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE))
return;
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART ||
@@ -987,8 +1051,8 @@ static int snd_timer_s_start(struct snd_timer * timer)
njiff += timer->sticks - priv->correction;
priv->correction = 0;
}
- priv->last_expires = priv->tlist.expires = njiff;
- add_timer(&priv->tlist);
+ priv->last_expires = njiff;
+ mod_timer(&priv->tlist, njiff);
return 0;
}
@@ -1057,6 +1121,8 @@ static void snd_timer_proc_read(struct snd_info_entry *entry,
mutex_lock(&register_mutex);
list_for_each_entry(timer, &snd_timer_list, device_list) {
+ if (timer->card && timer->card->shutdown)
+ continue;
switch (timer->tmr_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
snd_iprintf(buffer, "G%i: ", timer->tmr_device);
@@ -1263,7 +1329,7 @@ static int snd_timer_user_open(struct inode *inode, struct file *file)
return -ENOMEM;
spin_lock_init(&tu->qlock);
init_waitqueue_head(&tu->qchange_sleep);
- mutex_init(&tu->tread_sem);
+ mutex_init(&tu->ioctl_lock);
tu->ticks = 1;
tu->queue_size = 128;
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
@@ -1283,8 +1349,10 @@ static int snd_timer_user_release(struct inode *inode, struct file *file)
if (file->private_data) {
tu = file->private_data;
file->private_data = NULL;
+ mutex_lock(&tu->ioctl_lock);
if (tu->timeri)
snd_timer_close(tu->timeri);
+ mutex_unlock(&tu->ioctl_lock);
kfree(tu->queue);
kfree(tu->tqueue);
kfree(tu);
@@ -1522,7 +1590,6 @@ static int snd_timer_user_tselect(struct file *file,
int err = 0;
tu = file->private_data;
- mutex_lock(&tu->tread_sem);
if (tu->timeri) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
@@ -1566,7 +1633,6 @@ static int snd_timer_user_tselect(struct file *file,
}
__err:
- mutex_unlock(&tu->tread_sem);
return err;
}
@@ -1779,7 +1845,7 @@ enum {
SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
};
-static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
+static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu;
@@ -1796,17 +1862,11 @@ static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
{
int xarg;
- mutex_lock(&tu->tread_sem);
- if (tu->timeri) { /* too late */
- mutex_unlock(&tu->tread_sem);
+ if (tu->timeri) /* too late */
return -EBUSY;
- }
- if (get_user(xarg, p)) {
- mutex_unlock(&tu->tread_sem);
+ if (get_user(xarg, p))
return -EFAULT;
- }
tu->tread = xarg ? 1 : 0;
- mutex_unlock(&tu->tread_sem);
return 0;
}
case SNDRV_TIMER_IOCTL_GINFO:
@@ -1839,6 +1899,18 @@ static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
return -ENOTTY;
}
+static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct snd_timer_user *tu = file->private_data;
+ long ret;
+
+ mutex_lock(&tu->ioctl_lock);
+ ret = __snd_timer_user_ioctl(file, cmd, arg);
+ mutex_unlock(&tu->ioctl_lock);
+ return ret;
+}
+
static int snd_timer_user_fasync(int fd, struct file * file, int on)
{
struct snd_timer_user *tu;
@@ -1852,6 +1924,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
{
struct snd_timer_user *tu;
long result = 0, unit;
+ int qhead;
int err = 0;
tu = file->private_data;
@@ -1863,7 +1936,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
- break;
+ goto _error;
}
set_current_state(TASK_INTERRUPTIBLE);
@@ -1876,40 +1949,39 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
remove_wait_queue(&tu->qchange_sleep, &wait);
+ if (tu->disconnected) {
+ err = -ENODEV;
+ goto _error;
+ }
if (signal_pending(current)) {
err = -ERESTARTSYS;
- break;
+ goto _error;
}
}
+ qhead = tu->qhead++;
+ tu->qhead %= tu->queue_size;
spin_unlock_irq(&tu->qlock);
- if (err < 0)
- goto _error;
if (tu->tread) {
- if (copy_to_user(buffer, &tu->tqueue[tu->qhead++],
- sizeof(struct snd_timer_tread))) {
+ if (copy_to_user(buffer, &tu->tqueue[qhead],
+ sizeof(struct snd_timer_tread)))
err = -EFAULT;
- goto _error;
- }
} else {
- if (copy_to_user(buffer, &tu->queue[tu->qhead++],
- sizeof(struct snd_timer_read))) {
+ if (copy_to_user(buffer, &tu->queue[qhead],
+ sizeof(struct snd_timer_read)))
err = -EFAULT;
- goto _error;
- }
}
- tu->qhead %= tu->queue_size;
-
- result += unit;
- buffer += unit;
-
spin_lock_irq(&tu->qlock);
tu->qused--;
+ if (err < 0)
+ goto _error;
+ result += unit;
+ buffer += unit;
}
- spin_unlock_irq(&tu->qlock);
_error:
+ spin_unlock_irq(&tu->qlock);
return result > 0 ? result : err;
}
@@ -1925,6 +1997,8 @@ static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
mask = 0;
if (tu->qused)
mask |= POLLIN | POLLRDNORM;
+ if (tu->disconnected)
+ mask |= POLLERR;
return mask;
}
diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
index e05802ae6e1b..2e908225d754 100644
--- a/sound/core/timer_compat.c
+++ b/sound/core/timer_compat.c
@@ -70,13 +70,14 @@ static int snd_timer_user_status_compat(struct file *file,
struct snd_timer_status32 __user *_status)
{
struct snd_timer_user *tu;
- struct snd_timer_status status;
+ struct snd_timer_status32 status;
tu = file->private_data;
if (snd_BUG_ON(!tu->timeri))
return -ENXIO;
memset(&status, 0, sizeof(status));
- status.tstamp = tu->tstamp;
+ status.tstamp.tv_sec = tu->tstamp.tv_sec;
+ status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
status.resolution = snd_timer_resolution(tu->timeri);
status.lost = tu->timeri->lost;
status.overrun = tu->overrun;
@@ -88,12 +89,21 @@ static int snd_timer_user_status_compat(struct file *file,
return 0;
}
+#ifdef CONFIG_X86_X32
+/* X32 ABI has the same struct as x86-64 */
+#define snd_timer_user_status_x32(file, s) \
+ snd_timer_user_status(file, s)
+#endif /* CONFIG_X86_X32 */
+
/*
*/
enum {
SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32),
SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32),
+#ifdef CONFIG_X86_X32
+ SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status),
+#endif /* CONFIG_X86_X32 */
};
static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
@@ -122,6 +132,10 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
return snd_timer_user_info_compat(file, argp);
case SNDRV_TIMER_IOCTL_STATUS32:
return snd_timer_user_status_compat(file, argp);
+#ifdef CONFIG_X86_X32
+ case SNDRV_TIMER_IOCTL_STATUS_X32:
+ return snd_timer_user_status_x32(file, argp);
+#endif /* CONFIG_X86_X32 */
}
return -ENOIOCTLCMD;
}
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index 016e451ed506..a9f7a75702d2 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -109,6 +109,9 @@ struct dummy_timer_ops {
snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *);
};
+#define get_dummy_ops(substream) \
+ (*(const struct dummy_timer_ops **)(substream)->runtime->private_data)
+
struct dummy_model {
const char *name;
int (*playback_constraints)(struct snd_pcm_runtime *runtime);
@@ -137,7 +140,6 @@ struct snd_dummy {
int iobox;
struct snd_kcontrol *cd_volume_ctl;
struct snd_kcontrol *cd_switch_ctl;
- const struct dummy_timer_ops *timer_ops;
};
/*
@@ -231,6 +233,8 @@ static struct dummy_model *dummy_models[] = {
*/
struct dummy_systimer_pcm {
+ /* ops must be the first item */
+ const struct dummy_timer_ops *timer_ops;
spinlock_t lock;
struct timer_list timer;
unsigned long base_time;
@@ -366,6 +370,8 @@ static struct dummy_timer_ops dummy_systimer_ops = {
*/
struct dummy_hrtimer_pcm {
+ /* ops must be the first item */
+ const struct dummy_timer_ops *timer_ops;
ktime_t base_time;
ktime_t period_time;
atomic_t running;
@@ -492,31 +498,25 @@ static struct dummy_timer_ops dummy_hrtimer_ops = {
static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
- return dummy->timer_ops->start(substream);
+ return get_dummy_ops(substream)->start(substream);
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
- return dummy->timer_ops->stop(substream);
+ return get_dummy_ops(substream)->stop(substream);
}
return -EINVAL;
}
static int dummy_pcm_prepare(struct snd_pcm_substream *substream)
{
- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
- return dummy->timer_ops->prepare(substream);
+ return get_dummy_ops(substream)->prepare(substream);
}
static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream)
{
- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
- return dummy->timer_ops->pointer(substream);
+ return get_dummy_ops(substream)->pointer(substream);
}
static struct snd_pcm_hardware dummy_pcm_hardware = {
@@ -562,17 +562,19 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
struct dummy_model *model = dummy->model;
struct snd_pcm_runtime *runtime = substream->runtime;
+ const struct dummy_timer_ops *ops;
int err;
- dummy->timer_ops = &dummy_systimer_ops;
+ ops = &dummy_systimer_ops;
#ifdef CONFIG_HIGH_RES_TIMERS
if (hrtimer)
- dummy->timer_ops = &dummy_hrtimer_ops;
+ ops = &dummy_hrtimer_ops;
#endif
- err = dummy->timer_ops->create(substream);
+ err = ops->create(substream);
if (err < 0)
return err;
+ get_dummy_ops(substream) = ops;
runtime->hw = dummy->pcm_hw;
if (substream->pcm->device & 1) {
@@ -594,7 +596,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
err = model->capture_constraints(substream->runtime);
}
if (err < 0) {
- dummy->timer_ops->free(substream);
+ get_dummy_ops(substream)->free(substream);
return err;
}
return 0;
@@ -602,8 +604,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
static int dummy_pcm_close(struct snd_pcm_substream *substream)
{
- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
- dummy->timer_ops->free(substream);
+ get_dummy_ops(substream)->free(substream);
return 0;
}
diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
index 926e5dcbb66a..5022c9b97ddf 100644
--- a/sound/firewire/bebob/bebob_stream.c
+++ b/sound/firewire/bebob/bebob_stream.c
@@ -47,14 +47,16 @@ static const unsigned int bridgeco_freq_table[] = {
[6] = 0x07,
};
-static unsigned int
-get_formation_index(unsigned int rate)
+static int
+get_formation_index(unsigned int rate, unsigned int *index)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(snd_bebob_rate_table); i++) {
- if (snd_bebob_rate_table[i] == rate)
- return i;
+ if (snd_bebob_rate_table[i] == rate) {
+ *index = i;
+ return 0;
+ }
}
return -EINVAL;
}
@@ -425,7 +427,9 @@ make_both_connections(struct snd_bebob *bebob, unsigned int rate)
goto end;
/* confirm params for both streams */
- index = get_formation_index(rate);
+ err = get_formation_index(rate, &index);
+ if (err < 0)
+ goto end;
pcm_channels = bebob->tx_stream_formations[index].pcm;
midi_channels = bebob->tx_stream_formations[index].midi;
err = amdtp_am824_set_parameters(&bebob->tx_stream, rate,
diff --git a/sound/isa/Kconfig b/sound/isa/Kconfig
index 0216475fc759..37adcc6cbe6b 100644
--- a/sound/isa/Kconfig
+++ b/sound/isa/Kconfig
@@ -3,6 +3,7 @@
config SND_WSS_LIB
tristate
select SND_PCM
+ select SND_TIMER
config SND_SB_COMMON
tristate
@@ -42,6 +43,7 @@ config SND_AD1816A
select SND_OPL3_LIB
select SND_MPU401_UART
select SND_PCM
+ select SND_TIMER
help
Say Y here to include support for Analog Devices SoundPort
AD1816A or compatible sound chips.
@@ -209,6 +211,7 @@ config SND_GUSCLASSIC
tristate "Gravis UltraSound Classic"
select SND_RAWMIDI
select SND_PCM
+ select SND_TIMER
help
Say Y here to include support for Gravis UltraSound Classic
soundcards.
@@ -221,6 +224,7 @@ config SND_GUSEXTREME
select SND_OPL3_LIB
select SND_MPU401_UART
select SND_PCM
+ select SND_TIMER
help
Say Y here to include support for Gravis UltraSound Extreme
soundcards.
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 656ce39bddbc..8f6594a7d37f 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -155,6 +155,7 @@ config SND_AZT3328
select SND_PCM
select SND_RAWMIDI
select SND_AC97_CODEC
+ select SND_TIMER
depends on ZONE_DMA
help
Say Y here to include support for Aztech AZF3328 (PCI168)
@@ -463,6 +464,7 @@ config SND_EMU10K1
select SND_HWDEP
select SND_RAWMIDI
select SND_AC97_CODEC
+ select SND_TIMER
depends on ZONE_DMA
help
Say Y to include support for Sound Blaster PCI 512, Live!,
@@ -889,6 +891,7 @@ config SND_YMFPCI
select SND_OPL3_LIB
select SND_MPU401_UART
select SND_AC97_CODEC
+ select SND_TIMER
help
Say Y here to include support for Yamaha PCI audio chips -
YMF724, YMF724F, YMF740, YMF740C, YMF744, YMF754.
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index 70671ad65d24..6efadbfb3fe3 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -174,14 +174,40 @@ static inline bool codec_probed(struct hda_codec *codec)
return device_attach(hda_codec_dev(codec)) > 0 && codec->preset;
}
-/* try to auto-load and bind the codec module */
-static void codec_bind_module(struct hda_codec *codec)
+/* try to auto-load codec module */
+static void request_codec_module(struct hda_codec *codec)
{
#ifdef MODULE
char modalias[32];
+ const char *mod = NULL;
+
+ switch (codec->probe_id) {
+ case HDA_CODEC_ID_GENERIC_HDMI:
+#if IS_MODULE(CONFIG_SND_HDA_CODEC_HDMI)
+ mod = "snd-hda-codec-hdmi";
+#endif
+ break;
+ case HDA_CODEC_ID_GENERIC:
+#if IS_MODULE(CONFIG_SND_HDA_GENERIC)
+ mod = "snd-hda-codec-generic";
+#endif
+ break;
+ default:
+ snd_hdac_codec_modalias(&codec->core, modalias, sizeof(modalias));
+ mod = modalias;
+ break;
+ }
+
+ if (mod)
+ request_module(mod);
+#endif /* MODULE */
+}
- snd_hdac_codec_modalias(&codec->core, modalias, sizeof(modalias));
- request_module(modalias);
+/* try to auto-load and bind the codec module */
+static void codec_bind_module(struct hda_codec *codec)
+{
+#ifdef MODULE
+ request_codec_module(codec);
if (codec_probed(codec))
return;
#endif
@@ -218,17 +244,13 @@ static int codec_bind_generic(struct hda_codec *codec)
if (is_likely_hdmi_codec(codec)) {
codec->probe_id = HDA_CODEC_ID_GENERIC_HDMI;
-#if IS_MODULE(CONFIG_SND_HDA_CODEC_HDMI)
- request_module("snd-hda-codec-hdmi");
-#endif
+ request_codec_module(codec);
if (codec_probed(codec))
return 0;
}
codec->probe_id = HDA_CODEC_ID_GENERIC;
-#if IS_MODULE(CONFIG_SND_HDA_GENERIC)
- request_module("snd-hda-codec-generic");
-#endif
+ request_codec_module(codec);
if (codec_probed(codec))
return 0;
return -ENODEV;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index c6e8a651cea1..5c4fa8eba1d0 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -771,9 +771,6 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir,
unsigned int caps;
unsigned int mask, val;
- if (!enable && is_active_nid(codec, nid, dir, idx_to_check))
- return;
-
caps = query_amp_caps(codec, nid, dir);
val = get_amp_val_to_activate(codec, nid, dir, caps, enable);
mask = get_amp_mask_to_modify(codec, nid, dir, idx_to_check, caps);
@@ -784,12 +781,22 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir,
update_amp(codec, nid, dir, idx, mask, val);
}
+static void check_and_activate_amp(struct hda_codec *codec, hda_nid_t nid,
+ int dir, int idx, int idx_to_check,
+ bool enable)
+{
+ /* check whether the given amp is still used by others */
+ if (!enable && is_active_nid(codec, nid, dir, idx_to_check))
+ return;
+ activate_amp(codec, nid, dir, idx, idx_to_check, enable);
+}
+
static void activate_amp_out(struct hda_codec *codec, struct nid_path *path,
int i, bool enable)
{
hda_nid_t nid = path->path[i];
init_amp(codec, nid, HDA_OUTPUT, 0);
- activate_amp(codec, nid, HDA_OUTPUT, 0, 0, enable);
+ check_and_activate_amp(codec, nid, HDA_OUTPUT, 0, 0, enable);
}
static void activate_amp_in(struct hda_codec *codec, struct nid_path *path,
@@ -817,9 +824,16 @@ static void activate_amp_in(struct hda_codec *codec, struct nid_path *path,
* when aa-mixer is available, we need to enable the path as well
*/
for (n = 0; n < nums; n++) {
- if (n != idx && (!add_aamix || conn[n] != spec->mixer_merge_nid))
- continue;
- activate_amp(codec, nid, HDA_INPUT, n, idx, enable);
+ if (n != idx) {
+ if (conn[n] != spec->mixer_merge_nid)
+ continue;
+ /* when aamix is disabled, force to off */
+ if (!add_aamix) {
+ activate_amp(codec, nid, HDA_INPUT, n, n, false);
+ continue;
+ }
+ }
+ check_and_activate_amp(codec, nid, HDA_INPUT, n, idx, enable);
}
}
@@ -1580,6 +1594,12 @@ static bool map_singles(struct hda_codec *codec, int outs,
return found;
}
+static inline bool has_aamix_out_paths(struct hda_gen_spec *spec)
+{
+ return spec->aamix_out_paths[0] || spec->aamix_out_paths[1] ||
+ spec->aamix_out_paths[2];
+}
+
/* create a new path including aamix if available, and return its index */
static int check_aamix_out_path(struct hda_codec *codec, int path_idx)
{
@@ -2422,25 +2442,51 @@ static void update_aamix_paths(struct hda_codec *codec, bool do_mix,
}
}
+/* re-initialize the output paths; only called from loopback_mixing_put() */
+static void update_output_paths(struct hda_codec *codec, int num_outs,
+ const int *paths)
+{
+ struct hda_gen_spec *spec = codec->spec;
+ struct nid_path *path;
+ int i;
+
+ for (i = 0; i < num_outs; i++) {
+ path = snd_hda_get_path_from_idx(codec, paths[i]);
+ if (path)
+ snd_hda_activate_path(codec, path, path->active,
+ spec->aamix_mode);
+ }
+}
+
static int loopback_mixing_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct hda_gen_spec *spec = codec->spec;
+ const struct auto_pin_cfg *cfg = &spec->autocfg;
unsigned int val = ucontrol->value.enumerated.item[0];
if (val == spec->aamix_mode)
return 0;
spec->aamix_mode = val;
- update_aamix_paths(codec, val, spec->out_paths[0],
- spec->aamix_out_paths[0],
- spec->autocfg.line_out_type);
- update_aamix_paths(codec, val, spec->hp_paths[0],
- spec->aamix_out_paths[1],
- AUTO_PIN_HP_OUT);
- update_aamix_paths(codec, val, spec->speaker_paths[0],
- spec->aamix_out_paths[2],
- AUTO_PIN_SPEAKER_OUT);
+ if (has_aamix_out_paths(spec)) {
+ update_aamix_paths(codec, val, spec->out_paths[0],
+ spec->aamix_out_paths[0],
+ cfg->line_out_type);
+ update_aamix_paths(codec, val, spec->hp_paths[0],
+ spec->aamix_out_paths[1],
+ AUTO_PIN_HP_OUT);
+ update_aamix_paths(codec, val, spec->speaker_paths[0],
+ spec->aamix_out_paths[2],
+ AUTO_PIN_SPEAKER_OUT);
+ } else {
+ update_output_paths(codec, cfg->line_outs, spec->out_paths);
+ if (cfg->line_out_type != AUTO_PIN_HP_OUT)
+ update_output_paths(codec, cfg->hp_outs, spec->hp_paths);
+ if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT)
+ update_output_paths(codec, cfg->speaker_outs,
+ spec->speaker_paths);
+ }
return 1;
}
@@ -2458,12 +2504,13 @@ static int create_loopback_mixing_ctl(struct hda_codec *codec)
if (!spec->mixer_nid)
return 0;
- if (!(spec->aamix_out_paths[0] || spec->aamix_out_paths[1] ||
- spec->aamix_out_paths[2]))
- return 0;
if (!snd_hda_gen_add_kctl(spec, NULL, &loopback_mixing_enum))
return -ENOMEM;
spec->have_aamix_ctl = 1;
+ /* if no explicit aamix path is present (e.g. for Realtek codecs),
+ * enable aamix as default -- just for compatibility
+ */
+ spec->aamix_mode = !has_aamix_out_paths(spec);
return 0;
}
@@ -3998,9 +4045,9 @@ static void pin_power_callback(struct hda_codec *codec,
struct hda_jack_callback *jack,
bool on)
{
- if (jack && jack->tbl->nid)
+ if (jack && jack->nid)
sync_power_state_change(codec,
- set_pin_power_jack(codec, jack->tbl->nid, on));
+ set_pin_power_jack(codec, jack->nid, on));
}
/* callback only doing power up -- called at first */
@@ -5664,6 +5711,8 @@ static void init_aamix_paths(struct hda_codec *codec)
if (!spec->have_aamix_ctl)
return;
+ if (!has_aamix_out_paths(spec))
+ return;
update_aamix_paths(codec, spec->aamix_mode, spec->out_paths[0],
spec->aamix_out_paths[0],
spec->autocfg.line_out_type);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 3b3658297070..2ff692dd2c5f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -90,6 +90,8 @@ enum {
#define NVIDIA_HDA_ENABLE_COHBIT 0x01
/* Defines for Intel SCH HDA snoop control */
+#define INTEL_HDA_CGCTL 0x48
+#define INTEL_HDA_CGCTL_MISCBDCGE (0x1 << 6)
#define INTEL_SCH_HDA_DEVC 0x78
#define INTEL_SCH_HDA_DEVC_NOSNOOP (0x1<<11)
@@ -355,7 +357,10 @@ enum {
((pci)->device == 0x0d0c) || \
((pci)->device == 0x160c))
-#define IS_BROXTON(pci) ((pci)->device == 0x5a98)
+#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
+#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
+#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
static char *driver_short_names[] = {
[AZX_DRIVER_ICH] = "HDA Intel",
@@ -528,15 +533,26 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
{
struct hdac_bus *bus = azx_bus(chip);
struct pci_dev *pci = chip->pci;
+ u32 val;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
snd_hdac_set_codec_wakeup(bus, true);
+ if (IS_SKL_PLUS(pci)) {
+ pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
+ val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
+ pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
+ }
azx_init_chip(chip, full_reset);
+ if (IS_SKL_PLUS(pci)) {
+ pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
+ val = val | INTEL_HDA_CGCTL_MISCBDCGE;
+ pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
+ }
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
snd_hdac_set_codec_wakeup(bus, false);
/* reduce dma latency to avoid noise */
- if (IS_BROXTON(pci))
+ if (IS_BXT(pci))
bxt_reduce_dma_latency(chip);
}
@@ -958,11 +974,6 @@ static int azx_resume(struct device *dev)
/* put codec down to D3 at hibernation for Intel SKL+;
* otherwise BIOS may still access the codec and screw up the driver
*/
-#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
-#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
-#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
-
static int azx_freeze_noirq(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
@@ -2126,9 +2137,17 @@ i915_power_fail:
static void azx_remove(struct pci_dev *pci)
{
struct snd_card *card = pci_get_drvdata(pci);
+ struct azx *chip;
+ struct hda_intel *hda;
+
+ if (card) {
+ /* cancel the pending probing work */
+ chip = card->private_data;
+ hda = container_of(chip, struct hda_intel, chip);
+ cancel_work_sync(&hda->probe_work);
- if (card)
snd_card_free(card);
+ }
}
static void azx_shutdown(struct pci_dev *pci)
diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
index c945e257d368..a33234e04d4f 100644
--- a/sound/pci/hda/hda_jack.c
+++ b/sound/pci/hda/hda_jack.c
@@ -259,7 +259,7 @@ snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
if (!callback)
return ERR_PTR(-ENOMEM);
callback->func = func;
- callback->tbl = jack;
+ callback->nid = jack->nid;
callback->next = jack->callback;
jack->callback = callback;
}
diff --git a/sound/pci/hda/hda_jack.h b/sound/pci/hda/hda_jack.h
index 858708a044f5..e9814c0168ea 100644
--- a/sound/pci/hda/hda_jack.h
+++ b/sound/pci/hda/hda_jack.h
@@ -21,7 +21,7 @@ struct hda_jack_callback;
typedef void (*hda_jack_callback_fn) (struct hda_codec *, struct hda_jack_callback *);
struct hda_jack_callback {
- struct hda_jack_tbl *tbl;
+ hda_nid_t nid;
hda_jack_callback_fn func;
unsigned int private_data; /* arbitrary data */
struct hda_jack_callback *next;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 4ef2259f88ca..9ceb2bc36e68 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -4427,13 +4427,16 @@ static void ca0132_process_dsp_response(struct hda_codec *codec,
static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
{
struct ca0132_spec *spec = codec->spec;
+ struct hda_jack_tbl *tbl;
/* Delay enabling the HP amp, to let the mic-detection
* state machine run.
*/
cancel_delayed_work_sync(&spec->unsol_hp_work);
schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
- cb->tbl->block_report = 1;
+ tbl = snd_hda_jack_tbl_get(codec, cb->nid);
+ if (tbl)
+ tbl->block_report = 1;
}
static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index a12ae8ac0914..a47e8ae0eb30 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -174,8 +174,12 @@ static void cs_automute(struct hda_codec *codec)
snd_hda_gen_update_outputs(codec);
if (spec->gpio_eapd_hp || spec->gpio_eapd_speaker) {
- spec->gpio_data = spec->gen.hp_jack_present ?
- spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
+ if (spec->gen.automute_speaker)
+ spec->gpio_data = spec->gen.hp_jack_present ?
+ spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
+ else
+ spec->gpio_data =
+ spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
snd_hda_codec_write(codec, 0x01, 0,
AC_VERB_SET_GPIO_DATA, spec->gpio_data);
}
@@ -614,6 +618,7 @@ enum {
CS4208_MAC_AUTO,
CS4208_MBA6,
CS4208_MBP11,
+ CS4208_MACMINI,
CS4208_GPIO0,
};
@@ -621,6 +626,7 @@ static const struct hda_model_fixup cs4208_models[] = {
{ .id = CS4208_GPIO0, .name = "gpio0" },
{ .id = CS4208_MBA6, .name = "mba6" },
{ .id = CS4208_MBP11, .name = "mbp11" },
+ { .id = CS4208_MACMINI, .name = "macmini" },
{}
};
@@ -632,6 +638,7 @@ static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
/* codec SSID matching */
static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
+ SND_PCI_QUIRK(0x106b, 0x6c00, "MacMini 7,1", CS4208_MACMINI),
SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
@@ -666,6 +673,24 @@ static void cs4208_fixup_mac(struct hda_codec *codec,
snd_hda_apply_fixup(codec, action);
}
+/* MacMini 7,1 has the inverted jack detection */
+static void cs4208_fixup_macmini(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ static const struct hda_pintbl pincfgs[] = {
+ { 0x18, 0x00ab9150 }, /* mic (audio-in) jack: disable detect */
+ { 0x21, 0x004be140 }, /* SPDIF: disable detect */
+ { }
+ };
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ /* HP pin (0x10) has an inverted detection */
+ codec->inv_jack_detect = 1;
+ /* disable the bogus Mic and SPDIF jack detections */
+ snd_hda_apply_pincfgs(codec, pincfgs);
+ }
+}
+
static int cs4208_spdif_sw_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -709,6 +734,12 @@ static const struct hda_fixup cs4208_fixups[] = {
.chained = true,
.chain_id = CS4208_GPIO0,
},
+ [CS4208_MACMINI] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cs4208_fixup_macmini,
+ .chained = true,
+ .chain_id = CS4208_GPIO0,
+ },
[CS4208_GPIO0] = {
.type = HDA_FIXUP_FUNC,
.v.func = cs4208_fixup_gpio0,
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index ef198903c0c3..600af5878e75 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -204,8 +204,13 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
- if (codec->core.vendor_id != 0x14f150f2)
+ switch (codec->core.vendor_id) {
+ case 0x14f150f2: /* CX20722 */
+ case 0x14f150f4: /* CX20724 */
+ break;
+ default:
return;
+ }
/* Turn the CX20722 codec into D3 to avoid spurious noises
from the internal speaker during (and after) reboot */
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 4b6fb668c91c..f7bcd8dbac14 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -438,7 +438,8 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
eld = &per_pin->sink_eld;
mutex_lock(&per_pin->lock);
- if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) {
+ if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data) ||
+ eld->eld_size > ELD_MAX_SIZE) {
mutex_unlock(&per_pin->lock);
snd_BUG();
return -EINVAL;
@@ -1183,7 +1184,7 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid)
static void jack_callback(struct hda_codec *codec,
struct hda_jack_callback *jack)
{
- check_presence_and_report(codec, jack->tbl->nid);
+ check_presence_and_report(codec, jack->nid);
}
static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -2352,6 +2353,10 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
struct hda_codec *codec = audio_ptr;
int pin_nid = port + 0x04;
+ /* we assume only from port-B to port-D */
+ if (port < 1 || port > 3)
+ return;
+
/* skip notification during system suspend (but not in runtime PM);
* the state will be updated at resume
*/
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 3a89d82f8057..1402ba954b3d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -282,7 +282,7 @@ static void alc_update_knob_master(struct hda_codec *codec,
uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
if (!uctl)
return;
- val = snd_hda_codec_read(codec, jack->tbl->nid, 0,
+ val = snd_hda_codec_read(codec, jack->nid, 0,
AC_VERB_GET_VOLUME_KNOB_CONTROL, 0);
val &= HDA_AMP_VOLMASK;
uctl->value.integer.value[0] = val;
@@ -327,6 +327,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0292:
alc_update_coef_idx(codec, 0x4, 1<<15, 0);
break;
+ case 0x10ec0225:
case 0x10ec0233:
case 0x10ec0255:
case 0x10ec0256:
@@ -900,6 +901,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
{ 0x10ec0899, 0x1028, 0, "ALC3861" },
{ 0x10ec0298, 0x1028, 0, "ALC3266" },
{ 0x10ec0256, 0x1028, 0, "ALC3246" },
+ { 0x10ec0225, 0x1028, 0, "ALC3253" },
{ 0x10ec0670, 0x1025, 0, "ALC669X" },
{ 0x10ec0676, 0x1025, 0, "ALC679X" },
{ 0x10ec0282, 0x1043, 0, "ALC3229" },
@@ -1785,7 +1787,6 @@ enum {
ALC882_FIXUP_NO_PRIMARY_HP,
ALC887_FIXUP_ASUS_BASS,
ALC887_FIXUP_BASS_CHMAP,
- ALC882_FIXUP_DISABLE_AAMIX,
};
static void alc889_fixup_coef(struct hda_codec *codec,
@@ -1947,8 +1948,6 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
static void alc_fixup_bass_chmap(struct hda_codec *codec,
const struct hda_fixup *fix, int action);
-static void alc_fixup_disable_aamix(struct hda_codec *codec,
- const struct hda_fixup *fix, int action);
static const struct hda_fixup alc882_fixups[] = {
[ALC882_FIXUP_ABIT_AW9D_MAX] = {
@@ -2186,10 +2185,6 @@ static const struct hda_fixup alc882_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_bass_chmap,
},
- [ALC882_FIXUP_DISABLE_AAMIX] = {
- .type = HDA_FIXUP_FUNC,
- .v.func = alc_fixup_disable_aamix,
- },
};
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2228,6 +2223,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+ SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
/* All Apple entries are in codec SSIDs */
SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
@@ -2257,7 +2253,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
- SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX),
SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -2651,6 +2646,7 @@ enum {
ALC269_TYPE_ALC298,
ALC269_TYPE_ALC255,
ALC269_TYPE_ALC256,
+ ALC269_TYPE_ALC225,
};
/*
@@ -2680,6 +2676,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
case ALC269_TYPE_ALC298:
case ALC269_TYPE_ALC255:
case ALC269_TYPE_ALC256:
+ case ALC269_TYPE_ALC225:
ssids = alc269_ssids;
break;
default:
@@ -3658,6 +3655,16 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
WRITE_COEF(0xb7, 0x802b),
{}
};
+ static struct coef_fw coef0225[] = {
+ UPDATE_COEF(0x4a, 1<<8, 0),
+ UPDATE_COEFEX(0x57, 0x05, 1<<14, 0),
+ UPDATE_COEF(0x63, 3<<14, 3<<14),
+ UPDATE_COEF(0x4a, 3<<4, 2<<4),
+ UPDATE_COEF(0x4a, 3<<10, 3<<10),
+ UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
+ UPDATE_COEF(0x4a, 3<<10, 0),
+ {}
+ };
switch (codec->core.vendor_id) {
case 0x10ec0255:
@@ -3682,6 +3689,9 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
case 0x10ec0668:
alc_process_coef_fw(codec, coef0668);
break;
+ case 0x10ec0225:
+ alc_process_coef_fw(codec, coef0225);
+ break;
}
codec_dbg(codec, "Headset jack set to unplugged mode.\n");
}
@@ -3727,6 +3737,13 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
UPDATE_COEF(0xc3, 0, 1<<12),
{}
};
+ static struct coef_fw coef0225[] = {
+ UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14),
+ UPDATE_COEF(0x4a, 3<<4, 2<<4),
+ UPDATE_COEF(0x63, 3<<14, 0),
+ {}
+ };
+
switch (codec->core.vendor_id) {
case 0x10ec0255:
@@ -3772,12 +3789,22 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
alc_process_coef_fw(codec, coef0688);
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
break;
+ case 0x10ec0225:
+ alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
+ snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
+ alc_process_coef_fw(codec, coef0225);
+ snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
+ break;
}
codec_dbg(codec, "Headset jack set to mic-in mode.\n");
}
static void alc_headset_mode_default(struct hda_codec *codec)
{
+ static struct coef_fw coef0225[] = {
+ UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
+ {}
+ };
static struct coef_fw coef0255[] = {
WRITE_COEF(0x45, 0xc089),
WRITE_COEF(0x45, 0xc489),
@@ -3819,6 +3846,9 @@ static void alc_headset_mode_default(struct hda_codec *codec)
};
switch (codec->core.vendor_id) {
+ case 0x10ec0225:
+ alc_process_coef_fw(codec, coef0225);
+ break;
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@@ -3884,6 +3914,13 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
WRITE_COEF(0xc3, 0x0000),
{}
};
+ static struct coef_fw coef0225[] = {
+ UPDATE_COEF(0x45, 0x3f<<10, 0x35<<10),
+ UPDATE_COEF(0x49, 1<<8, 1<<8),
+ UPDATE_COEF(0x4a, 7<<6, 7<<6),
+ UPDATE_COEF(0x4a, 3<<4, 3<<4),
+ {}
+ };
switch (codec->core.vendor_id) {
case 0x10ec0255:
@@ -3912,6 +3949,9 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
case 0x10ec0668:
alc_process_coef_fw(codec, coef0688);
break;
+ case 0x10ec0225:
+ alc_process_coef_fw(codec, coef0225);
+ break;
}
codec_dbg(codec, "Headset jack set to iPhone-style headset mode.\n");
}
@@ -3955,6 +3995,13 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
WRITE_COEF(0xc3, 0x0000),
{}
};
+ static struct coef_fw coef0225[] = {
+ UPDATE_COEF(0x45, 0x3f<<10, 0x39<<10),
+ UPDATE_COEF(0x49, 1<<8, 1<<8),
+ UPDATE_COEF(0x4a, 7<<6, 7<<6),
+ UPDATE_COEF(0x4a, 3<<4, 3<<4),
+ {}
+ };
switch (codec->core.vendor_id) {
case 0x10ec0255:
@@ -3983,6 +4030,9 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
case 0x10ec0668:
alc_process_coef_fw(codec, coef0688);
break;
+ case 0x10ec0225:
+ alc_process_coef_fw(codec, coef0225);
+ break;
}
codec_dbg(codec, "Headset jack set to Nokia-style headset mode.\n");
}
@@ -4014,6 +4064,11 @@ static void alc_determine_headset_type(struct hda_codec *codec)
WRITE_COEF(0xc3, 0x0c00),
{}
};
+ static struct coef_fw coef0225[] = {
+ UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
+ UPDATE_COEF(0x49, 1<<8, 1<<8),
+ {}
+ };
switch (codec->core.vendor_id) {
case 0x10ec0255:
@@ -4058,6 +4113,12 @@ static void alc_determine_headset_type(struct hda_codec *codec)
val = alc_read_coef_idx(codec, 0xbe);
is_ctia = (val & 0x1c02) == 0x1c02;
break;
+ case 0x10ec0225:
+ alc_process_coef_fw(codec, coef0225);
+ msleep(800);
+ val = alc_read_coef_idx(codec, 0x46);
+ is_ctia = (val & 0x00f0) == 0x00f0;
+ break;
}
codec_dbg(codec, "Headset jack detected iPhone-style headset: %s\n",
@@ -4666,6 +4727,7 @@ enum {
ALC290_FIXUP_SUBWOOFER,
ALC290_FIXUP_SUBWOOFER_HSJACK,
ALC269_FIXUP_THINKPAD_ACPI,
+ ALC269_FIXUP_DMIC_THINKPAD_ACPI,
ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
ALC255_FIXUP_HEADSET_MODE,
@@ -4694,6 +4756,11 @@ enum {
ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
ALC293_FIXUP_LENOVO_SPK_NOISE,
ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
+ ALC255_FIXUP_DELL_SPK_NOISE,
+ ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC280_FIXUP_HP_HEADSET_MIC,
+ ALC221_FIXUP_HP_FRONT_MIC,
+ ALC292_FIXUP_TPT460,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -5103,6 +5170,12 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = hda_fixup_thinkpad_acpi,
},
+ [ALC269_FIXUP_DMIC_THINKPAD_ACPI] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_inv_dmic,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
+ },
[ALC255_FIXUP_DELL1_MIC_NO_PRESENCE] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -5307,6 +5380,42 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc233_fixup_lenovo_line2_mic_hotkey,
},
+ [ALC255_FIXUP_DELL_SPK_NOISE] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_disable_aamix,
+ .chained = true,
+ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
+ },
+ [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* Disable pass-through path for FRONT 14h */
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+ },
+ [ALC280_FIXUP_HP_HEADSET_MIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_disable_aamix,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MIC,
+ },
+ [ALC221_FIXUP_HP_FRONT_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x02a19020 }, /* Front Mic */
+ { }
+ },
+ },
+ [ALC292_FIXUP_TPT460] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_tpt440_dock,
+ .chained = true,
+ .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5318,12 +5427,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
+ SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
+ SND_PCI_QUIRK(0x1028, 0x05be, "Dell Latitude E6540", ALC292_FIXUP_DELL_E7X),
SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X),
SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X),
SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER),
@@ -5332,6 +5443,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
+ SND_PCI_QUIRK(0x1028, 0x062c, "Dell Latitude E5550", ALC292_FIXUP_DELL_E7X),
SND_PCI_QUIRK(0x1028, 0x062e, "Dell Latitude E7450", ALC292_FIXUP_DELL_E7X),
SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5347,6 +5459,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5407,6 +5520,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -5455,8 +5570,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+ SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -5548,8 +5665,12 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
{.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
{.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
+ {.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
{}
};
+#define ALC225_STANDARD_PINS \
+ {0x12, 0xb7a60130}, \
+ {0x21, 0x04211020}
#define ALC256_STANDARD_PINS \
{0x12, 0x90a60140}, \
@@ -5571,6 +5692,12 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{0x21, 0x03211020}
static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC225_STANDARD_PINS,
+ {0x14, 0x901701a0}),
+ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC225_STANDARD_PINS,
+ {0x14, 0x901701b0}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
{0x14, 0x90170110},
{0x21, 0x02211020}),
@@ -5617,6 +5744,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x21, 0x02211040}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60170},
+ {0x14, 0x90171130},
+ {0x21, 0x02211040}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60170},
{0x14, 0x90170140},
{0x21, 0x02211050}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5548", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -5892,6 +6023,9 @@ static int patch_alc269(struct hda_codec *codec)
spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
break;
+ case 0x10ec0225:
+ spec->codec_variant = ALC269_TYPE_ALC225;
+ break;
}
if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
@@ -6289,6 +6423,7 @@ enum {
ALC668_FIXUP_AUTO_MUTE,
ALC668_FIXUP_DELL_DISABLE_AAMIX,
ALC668_FIXUP_DELL_XPS13,
+ ALC662_FIXUP_ASUS_Nx50,
};
static const struct hda_fixup alc662_fixups[] = {
@@ -6529,6 +6664,12 @@ static const struct hda_fixup alc662_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_bass_chmap,
},
+ [ALC662_FIXUP_ASUS_Nx50] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_auto_mute_via_amp,
+ .chained = true,
+ .chain_id = ALC662_FIXUP_BASS_1A
+ },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6551,7 +6692,9 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
- SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A),
+ SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
+ SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
+ SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
@@ -6781,6 +6924,7 @@ static int patch_alc680(struct hda_codec *codec)
*/
static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 2c7c5eb8b1e9..37b70f8e878f 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -493,9 +493,9 @@ static void jack_update_power(struct hda_codec *codec,
if (!spec->num_pwrs)
return;
- if (jack && jack->tbl->nid) {
- stac_toggle_power_map(codec, jack->tbl->nid,
- snd_hda_jack_detect(codec, jack->tbl->nid),
+ if (jack && jack->nid) {
+ stac_toggle_power_map(codec, jack->nid,
+ snd_hda_jack_detect(codec, jack->nid),
true);
return;
}
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 42bcbac801a3..ccdab29a8b66 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -2879,6 +2879,7 @@ static void intel8x0_measure_ac97_clock(struct intel8x0 *chip)
static struct snd_pci_quirk intel8x0_clock_list[] = {
SND_PCI_QUIRK(0x0e11, 0x008a, "AD1885", 41000),
+ SND_PCI_QUIRK(0x1014, 0x0581, "AD1981B", 48000),
SND_PCI_QUIRK(0x1028, 0x00be, "AD1885", 44100),
SND_PCI_QUIRK(0x1028, 0x0177, "AD1980", 48000),
SND_PCI_QUIRK(0x1028, 0x01ad, "AD1981B", 48000),
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 2875b4f6d8c9..7c8941b8b2de 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -2879,7 +2879,7 @@ static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
{
struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
- ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp);
+ ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp);
return 0;
}
@@ -2891,7 +2891,7 @@ static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
if (!snd_hdsp_use_is_exclusive(hdsp))
return -EBUSY;
- val = ucontrol->value.enumerated.item[0];
+ val = ucontrol->value.integer.value[0];
spin_lock_irq(&hdsp->lock);
if (val != hdsp_dds_offset(hdsp))
change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0;
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 8bc8016c173d..a4a999a0317e 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -1601,6 +1601,9 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate)
{
u64 n;
+ if (snd_BUG_ON(rate <= 0))
+ return;
+
if (rate >= 112000)
rate /= 4;
else if (rate >= 56000)
@@ -2215,6 +2218,8 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm)
} else {
/* slave mode, return external sample rate */
rate = hdspm_external_sample_rate(hdspm);
+ if (!rate)
+ rate = hdspm->system_sample_rate;
}
}
@@ -2260,8 +2265,11 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol,
ucontrol)
{
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+ int rate = ucontrol->value.integer.value[0];
- hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]);
+ if (rate < 27000 || rate > 207000)
+ return -EINVAL;
+ hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]);
return 0;
}
@@ -4449,7 +4457,7 @@ static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol,
{
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- ucontrol->value.enumerated.item[0] = hdspm->tco->term;
+ ucontrol->value.integer.value[0] = hdspm->tco->term;
return 0;
}
@@ -4460,8 +4468,8 @@ static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol,
{
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) {
- hdspm->tco->term = ucontrol->value.enumerated.item[0];
+ if (hdspm->tco->term != ucontrol->value.integer.value[0]) {
+ hdspm->tco->term = ucontrol->value.integer.value[0];
hdspm_tco_write(hdspm);
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 3e3c7f6be29d..b74840b5becf 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -621,7 +621,7 @@ static const struct snd_kcontrol_new rt5645_snd_controls[] = {
/* IN1/IN2 Control */
SOC_SINGLE_TLV("IN1 Boost", RT5645_IN1_CTRL1,
- RT5645_BST_SFT1, 8, 0, bst_tlv),
+ RT5645_BST_SFT1, 12, 0, bst_tlv),
SOC_SINGLE_TLV("IN2 Boost", RT5645_IN2_CTRL,
RT5645_BST_SFT2, 8, 0, bst_tlv),
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index c04c0bc6f58a..52b9ccf6d389 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -360,15 +360,13 @@ static int wm5110_hp_ev(struct snd_soc_dapm_widget *w,
static int wm5110_clear_pga_volume(struct arizona *arizona, int output)
{
- struct reg_sequence clear_pga = {
- ARIZONA_OUTPUT_PATH_CONFIG_1L + output * 4, 0x80
- };
+ unsigned int reg = ARIZONA_OUTPUT_PATH_CONFIG_1L + output * 4;
int ret;
- ret = regmap_multi_reg_write_bypassed(arizona->regmap, &clear_pga, 1);
+ ret = regmap_write(arizona->regmap, reg, 0x80);
if (ret)
dev_err(arizona->dev, "Failed to clear PGA (0x%x): %d\n",
- clear_pga.reg, ret);
+ reg, ret);
return ret;
}
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
index c799cca5abeb..6b864c0fc2b6 100644
--- a/sound/soc/codecs/wm8958-dsp2.c
+++ b/sound/soc/codecs/wm8958-dsp2.c
@@ -459,7 +459,7 @@ static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994;
- int value = ucontrol->value.integer.value[0];
+ int value = ucontrol->value.enumerated.item[0];
int reg;
/* Don't allow on the fly reconfiguration */
@@ -549,7 +549,7 @@ static int wm8958_put_vss_enum(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994;
- int value = ucontrol->value.integer.value[0];
+ int value = ucontrol->value.enumerated.item[0];
int reg;
/* Don't allow on the fly reconfiguration */
@@ -582,7 +582,7 @@ static int wm8958_put_vss_hpf_enum(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994;
- int value = ucontrol->value.integer.value[0];
+ int value = ucontrol->value.enumerated.item[0];
int reg;
/* Don't allow on the fly reconfiguration */
@@ -749,7 +749,7 @@ static int wm8958_put_enh_eq_enum(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
struct wm8994 *control = wm8994->wm8994;
- int value = ucontrol->value.integer.value[0];
+ int value = ucontrol->value.enumerated.item[0];
int reg;
/* Don't allow on the fly reconfiguration */
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 2ccbb322df77..a18aecb49935 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -362,7 +362,7 @@ static int wm8994_put_drc_enum(struct snd_kcontrol *kcontrol,
struct wm8994 *control = wm8994->wm8994;
struct wm8994_pdata *pdata = &control->pdata;
int drc = wm8994_get_drc(kcontrol->id.name);
- int value = ucontrol->value.integer.value[0];
+ int value = ucontrol->value.enumerated.item[0];
if (drc < 0)
return drc;
@@ -469,7 +469,7 @@ static int wm8994_put_retune_mobile_enum(struct snd_kcontrol *kcontrol,
struct wm8994 *control = wm8994->wm8994;
struct wm8994_pdata *pdata = &control->pdata;
int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
- int value = ucontrol->value.integer.value[0];
+ int value = ucontrol->value.enumerated.item[0];
if (block < 0)
return block;
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
index e4145509d63c..9c5219392460 100644
--- a/sound/soc/samsung/ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -324,7 +324,7 @@ static const struct snd_soc_component_driver s3c_ac97_component = {
static int s3c_ac97_probe(struct platform_device *pdev)
{
- struct resource *mem_res, *dmatx_res, *dmarx_res, *dmamic_res, *irq_res;
+ struct resource *mem_res, *irq_res;
struct s3c_audio_pdata *ac97_pdata;
int ret;
@@ -335,24 +335,6 @@ static int s3c_ac97_probe(struct platform_device *pdev)
}
/* Check for availability of necessary resource */
- dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!dmatx_res) {
- dev_err(&pdev->dev, "Unable to get AC97-TX dma resource\n");
- return -ENXIO;
- }
-
- dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (!dmarx_res) {
- dev_err(&pdev->dev, "Unable to get AC97-RX dma resource\n");
- return -ENXIO;
- }
-
- dmamic_res = platform_get_resource(pdev, IORESOURCE_DMA, 2);
- if (!dmamic_res) {
- dev_err(&pdev->dev, "Unable to get AC97-MIC dma resource\n");
- return -ENXIO;
- }
-
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq_res) {
dev_err(&pdev->dev, "AC97 IRQ not provided!\n");
@@ -364,11 +346,11 @@ static int s3c_ac97_probe(struct platform_device *pdev)
if (IS_ERR(s3c_ac97.regs))
return PTR_ERR(s3c_ac97.regs);
- s3c_ac97_pcm_out.channel = dmatx_res->start;
+ s3c_ac97_pcm_out.slave = ac97_pdata->dma_playback;
s3c_ac97_pcm_out.dma_addr = mem_res->start + S3C_AC97_PCM_DATA;
- s3c_ac97_pcm_in.channel = dmarx_res->start;
+ s3c_ac97_pcm_in.slave = ac97_pdata->dma_capture;
s3c_ac97_pcm_in.dma_addr = mem_res->start + S3C_AC97_PCM_DATA;
- s3c_ac97_mic_in.channel = dmamic_res->start;
+ s3c_ac97_mic_in.slave = ac97_pdata->dma_capture_mic;
s3c_ac97_mic_in.dma_addr = mem_res->start + S3C_AC97_MIC_DATA;
init_completion(&s3c_ac97.done);
diff --git a/sound/soc/samsung/dma.h b/sound/soc/samsung/dma.h
index 0e85dcfec023..085ef30f5ca2 100644
--- a/sound/soc/samsung/dma.h
+++ b/sound/soc/samsung/dma.h
@@ -15,7 +15,7 @@
#include <sound/dmaengine_pcm.h>
struct s3c_dma_params {
- int channel; /* Channel ID */
+ void *slave; /* Channel ID */
dma_addr_t dma_addr;
int dma_size; /* Size of the DMA transfer */
char *ch_name;
diff --git a/sound/soc/samsung/dmaengine.c b/sound/soc/samsung/dmaengine.c
index 506f5bf6d082..727008d57d14 100644
--- a/sound/soc/samsung/dmaengine.c
+++ b/sound/soc/samsung/dmaengine.c
@@ -50,14 +50,14 @@ void samsung_asoc_init_dma_data(struct snd_soc_dai *dai,
if (playback) {
playback_data = &playback->dma_data;
- playback_data->filter_data = (void *)playback->channel;
+ playback_data->filter_data = playback->slave;
playback_data->chan_name = playback->ch_name;
playback_data->addr = playback->dma_addr;
playback_data->addr_width = playback->dma_size;
}
if (capture) {
capture_data = &capture->dma_data;
- capture_data->filter_data = (void *)capture->channel;
+ capture_data->filter_data = capture->slave;
capture_data->chan_name = capture->ch_name;
capture_data->addr = capture->dma_addr;
capture_data->addr_width = capture->dma_size;
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index ea4ab374a223..e163b0148c4b 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -480,10 +480,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
unsigned int cdcon_mask = 1 << i2s_regs->cdclkcon_off;
unsigned int rsrc_mask = 1 << i2s_regs->rclksrc_off;
u32 mod, mask, val = 0;
+ unsigned long flags;
- spin_lock(i2s->lock);
+ spin_lock_irqsave(i2s->lock, flags);
mod = readl(i2s->addr + I2SMOD);
- spin_unlock(i2s->lock);
+ spin_unlock_irqrestore(i2s->lock, flags);
switch (clk_id) {
case SAMSUNG_I2S_OPCLK:
@@ -574,11 +575,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
return -EINVAL;
}
- spin_lock(i2s->lock);
+ spin_lock_irqsave(i2s->lock, flags);
mod = readl(i2s->addr + I2SMOD);
mod = (mod & ~mask) | val;
writel(mod, i2s->addr + I2SMOD);
- spin_unlock(i2s->lock);
+ spin_unlock_irqrestore(i2s->lock, flags);
return 0;
}
@@ -589,6 +590,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
struct i2s_dai *i2s = to_info(dai);
int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave;
u32 mod, tmp = 0;
+ unsigned long flags;
lrp_shift = i2s->variant_regs->lrp_off;
sdf_shift = i2s->variant_regs->sdf_off;
@@ -648,7 +650,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
return -EINVAL;
}
- spin_lock(i2s->lock);
+ spin_lock_irqsave(i2s->lock, flags);
mod = readl(i2s->addr + I2SMOD);
/*
* Don't change the I2S mode if any controller is active on this
@@ -656,7 +658,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
*/
if (any_active(i2s) &&
((mod & (sdf_mask | lrp_rlow | mod_slave)) != tmp)) {
- spin_unlock(i2s->lock);
+ spin_unlock_irqrestore(i2s->lock, flags);
dev_err(&i2s->pdev->dev,
"%s:%d Other DAI busy\n", __func__, __LINE__);
return -EAGAIN;
@@ -665,7 +667,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
mod &= ~(sdf_mask | lrp_rlow | mod_slave);
mod |= tmp;
writel(mod, i2s->addr + I2SMOD);
- spin_unlock(i2s->lock);
+ spin_unlock_irqrestore(i2s->lock, flags);
return 0;
}
@@ -675,6 +677,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
{
struct i2s_dai *i2s = to_info(dai);
u32 mod, mask = 0, val = 0;
+ unsigned long flags;
if (!is_secondary(i2s))
mask |= (MOD_DC2_EN | MOD_DC1_EN);
@@ -743,11 +746,11 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- spin_lock(i2s->lock);
+ spin_lock_irqsave(i2s->lock, flags);
mod = readl(i2s->addr + I2SMOD);
mod = (mod & ~mask) | val;
writel(mod, i2s->addr + I2SMOD);
- spin_unlock(i2s->lock);
+ spin_unlock_irqrestore(i2s->lock, flags);
samsung_asoc_init_dma_data(dai, &i2s->dma_playback, &i2s->dma_capture);
@@ -1257,27 +1260,14 @@ static int samsung_i2s_probe(struct platform_device *pdev)
pri_dai->lock = &pri_dai->spinlock;
if (!np) {
- res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!res) {
- dev_err(&pdev->dev,
- "Unable to get I2S-TX dma resource\n");
- return -ENXIO;
- }
- pri_dai->dma_playback.channel = res->start;
-
- res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (!res) {
- dev_err(&pdev->dev,
- "Unable to get I2S-RX dma resource\n");
- return -ENXIO;
- }
- pri_dai->dma_capture.channel = res->start;
-
if (i2s_pdata == NULL) {
dev_err(&pdev->dev, "Can't work without s3c_audio_pdata\n");
return -EINVAL;
}
+ pri_dai->dma_playback.slave = i2s_pdata->dma_playback;
+ pri_dai->dma_capture.slave = i2s_pdata->dma_capture;
+
if (&i2s_pdata->type)
i2s_cfg = &i2s_pdata->type.i2s;
@@ -1338,11 +1328,8 @@ static int samsung_i2s_probe(struct platform_device *pdev)
sec_dai->dma_playback.dma_addr = regs_base + I2STXDS;
sec_dai->dma_playback.ch_name = "tx-sec";
- if (!np) {
- res = platform_get_resource(pdev, IORESOURCE_DMA, 2);
- if (res)
- sec_dai->dma_playback.channel = res->start;
- }
+ if (!np)
+ sec_dai->dma_playback.slave = i2s_pdata->dma_play_sec;
sec_dai->dma_playback.dma_size = 4;
sec_dai->addr = pri_dai->addr;
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index b320a9d3fbf8..c77f324e0bb8 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -486,7 +486,7 @@ static const struct snd_soc_component_driver s3c_pcm_component = {
static int s3c_pcm_dev_probe(struct platform_device *pdev)
{
struct s3c_pcm_info *pcm;
- struct resource *mem_res, *dmatx_res, *dmarx_res;
+ struct resource *mem_res;
struct s3c_audio_pdata *pcm_pdata;
int ret;
@@ -499,18 +499,6 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
pcm_pdata = pdev->dev.platform_data;
/* Check for availability of necessary resource */
- dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!dmatx_res) {
- dev_err(&pdev->dev, "Unable to get PCM-TX dma resource\n");
- return -ENXIO;
- }
-
- dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (!dmarx_res) {
- dev_err(&pdev->dev, "Unable to get PCM-RX dma resource\n");
- return -ENXIO;
- }
-
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem_res) {
dev_err(&pdev->dev, "Unable to get register resource\n");
@@ -568,8 +556,10 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
s3c_pcm_stereo_out[pdev->id].dma_addr = mem_res->start
+ S3C_PCM_TXFIFO;
- s3c_pcm_stereo_in[pdev->id].channel = dmarx_res->start;
- s3c_pcm_stereo_out[pdev->id].channel = dmatx_res->start;
+ if (pcm_pdata) {
+ s3c_pcm_stereo_in[pdev->id].slave = pcm_pdata->dma_capture;
+ s3c_pcm_stereo_out[pdev->id].slave = pcm_pdata->dma_playback;
+ }
pcm->dma_capture = &s3c_pcm_stereo_in[pdev->id];
pcm->dma_playback = &s3c_pcm_stereo_out[pdev->id];
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index 2b766d212ce0..77d27c85a32a 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -34,13 +34,13 @@
#include "s3c2412-i2s.h"
static struct s3c_dma_params s3c2412_i2s_pcm_stereo_out = {
- .channel = DMACH_I2S_OUT,
+ .slave = (void *)(uintptr_t)DMACH_I2S_OUT,
.ch_name = "tx",
.dma_size = 4,
};
static struct s3c_dma_params s3c2412_i2s_pcm_stereo_in = {
- .channel = DMACH_I2S_IN,
+ .slave = (void *)(uintptr_t)DMACH_I2S_IN,
.ch_name = "rx",
.dma_size = 4,
};
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index 5bf723689692..9da3a77ea2c7 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -32,13 +32,13 @@
#include "s3c24xx-i2s.h"
static struct s3c_dma_params s3c24xx_i2s_pcm_stereo_out = {
- .channel = DMACH_I2S_OUT,
+ .slave = (void *)(uintptr_t)DMACH_I2S_OUT,
.ch_name = "tx",
.dma_size = 2,
};
static struct s3c_dma_params s3c24xx_i2s_pcm_stereo_in = {
- .channel = DMACH_I2S_IN,
+ .slave = (void *)(uintptr_t)DMACH_I2S_IN,
.ch_name = "rx",
.dma_size = 2,
};
diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
index 36dbc0e96004..9dd7ee6d03ff 100644
--- a/sound/soc/samsung/spdif.c
+++ b/sound/soc/samsung/spdif.c
@@ -359,7 +359,7 @@ static const struct snd_soc_component_driver samsung_spdif_component = {
static int spdif_probe(struct platform_device *pdev)
{
struct s3c_audio_pdata *spdif_pdata;
- struct resource *mem_res, *dma_res;
+ struct resource *mem_res;
struct samsung_spdif_info *spdif;
int ret;
@@ -367,12 +367,6 @@ static int spdif_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Entered %s\n", __func__);
- dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!dma_res) {
- dev_err(&pdev->dev, "Unable to get dma resource.\n");
- return -ENXIO;
- }
-
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem_res) {
dev_err(&pdev->dev, "Unable to get register resource.\n");
@@ -432,7 +426,7 @@ static int spdif_probe(struct platform_device *pdev)
spdif_stereo_out.dma_size = 2;
spdif_stereo_out.dma_addr = mem_res->start + DATA_OUTBUF;
- spdif_stereo_out.channel = dma_res->start;
+ spdif_stereo_out.slave = spdif_pdata ? spdif_pdata->dma_playback : NULL;
spdif->dma_playback = &spdif_stereo_out;
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 12a9820feac1..bb82bb966000 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -630,6 +630,7 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
struct snd_pcm *be_pcm;
char new_name[64];
int ret = 0, direction = 0;
+ int playback = 0, capture = 0;
if (rtd->num_codecs > 1) {
dev_err(rtd->card->dev, "Multicodec not supported for compressed stream\n");
@@ -641,11 +642,27 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
rtd->dai_link->stream_name, codec_dai->name, num);
if (codec_dai->driver->playback.channels_min)
+ playback = 1;
+ if (codec_dai->driver->capture.channels_min)
+ capture = 1;
+
+ capture = capture && cpu_dai->driver->capture.channels_min;
+ playback = playback && cpu_dai->driver->playback.channels_min;
+
+ /*
+ * Compress devices are unidirectional so only one of the directions
+ * should be set, check for that (xor)
+ */
+ if (playback + capture != 1) {
+ dev_err(rtd->card->dev, "Invalid direction for compress P %d, C %d\n",
+ playback, capture);
+ return -EINVAL;
+ }
+
+ if(playback)
direction = SND_COMPRESS_PLAYBACK;
- else if (codec_dai->driver->capture.channels_min)
- direction = SND_COMPRESS_CAPTURE;
else
- return -EINVAL;
+ direction = SND_COMPRESS_CAPTURE;
compr = kzalloc(sizeof(*compr), GFP_KERNEL);
if (compr == NULL) {
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 7d009428934a..416514fe9e63 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -3568,7 +3568,7 @@ static int snd_soc_dapm_dai_link_get(struct snd_kcontrol *kcontrol,
{
struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol);
- ucontrol->value.integer.value[0] = w->params_select;
+ ucontrol->value.enumerated.item[0] = w->params_select;
return 0;
}
@@ -3582,13 +3582,13 @@ static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol,
if (w->power)
return -EBUSY;
- if (ucontrol->value.integer.value[0] == w->params_select)
+ if (ucontrol->value.enumerated.item[0] == w->params_select)
return 0;
- if (ucontrol->value.integer.value[0] >= w->num_params)
+ if (ucontrol->value.enumerated.item[0] >= w->num_params)
return -EINVAL;
- w->params_select = ucontrol->value.integer.value[0];
+ w->params_select = ucontrol->value.enumerated.item[0];
return 0;
}
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index c86dc96e8986..65b936e251ea 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1743,7 +1743,8 @@ int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
continue;
dev_dbg(be->dev, "ASoC: hw_free BE %s\n",
diff --git a/sound/sparc/Kconfig b/sound/sparc/Kconfig
index d75deba5617d..dfcd38647606 100644
--- a/sound/sparc/Kconfig
+++ b/sound/sparc/Kconfig
@@ -22,6 +22,7 @@ config SND_SUN_AMD7930
config SND_SUN_CS4231
tristate "Sun CS4231"
select SND_PCM
+ select SND_TIMER
help
Say Y here to include support for CS4231 sound device on Sun.
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 18f56646ce86..1f09d9591276 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -675,6 +675,8 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
void snd_usb_autosuspend(struct snd_usb_audio *chip)
{
+ if (atomic_read(&chip->shutdown))
+ return;
if (atomic_dec_and_test(&chip->active))
usb_autopm_put_interface(chip->pm_intf);
}
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 2ed260b10f6d..7ccbcaf6a147 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -285,6 +285,8 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface,
unsigned char data[3];
int err, crate;
+ if (get_iface_desc(alts)->bNumEndpoints < 1)
+ return -EINVAL;
ep = get_endpoint(alts, 0)->bEndpointAddress;
/* if endpoint doesn't have sampling rate control, bail out */
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 7b1cb365ffab..c07a7eda42a2 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -438,6 +438,9 @@ exit_clear:
*
* New endpoints will be added to chip->ep_list and must be freed by
* calling snd_usb_endpoint_free().
+ *
+ * For SND_USB_ENDPOINT_TYPE_SYNC, the caller needs to guarantee that
+ * bNumEndpoints > 1 beforehand.
*/
struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
struct usb_host_interface *alts,
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 5b4c58c3e2c5..b21b76690b31 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -2454,7 +2454,6 @@ int snd_usbmidi_create(struct snd_card *card,
else
err = snd_usbmidi_create_endpoints(umidi, endpoints);
if (err < 0) {
- snd_usbmidi_free(umidi);
return err;
}
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index ddca6547399b..1f8fb0d904e0 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -349,6 +349,16 @@ static struct usbmix_name_map bose_companion5_map[] = {
};
/*
+ * Dell usb dock with ALC4020 codec had a firmware problem where it got
+ * screwed up when zero volume is passed; just skip it as a workaround
+ */
+static const struct usbmix_name_map dell_alc4020_map[] = {
+ { 16, NULL },
+ { 19, NULL },
+ { 0 }
+};
+
+/*
* Control map entries
*/
@@ -431,6 +441,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
.map = aureon_51_2_map,
},
{
+ .id = USB_ID(0x0bda, 0x4014),
+ .map = dell_alc4020_map,
+ },
+ {
.id = USB_ID(0x0dba, 0x1000),
.map = mbox1_map,
},
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 0ce888dceed0..f6c3bf79af9a 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -793,7 +793,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
return 0;
kcontrol->private_value &= ~(0xff << 24);
- kcontrol->private_value |= newval;
+ kcontrol->private_value |= (unsigned int)newval << 24;
err = snd_ni_update_cur_val(list);
return err < 0 ? err : 1;
}
@@ -1519,7 +1519,11 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
/* use known values for that card: interface#1 altsetting#1 */
iface = usb_ifnum_to_if(chip->dev, 1);
+ if (!iface || iface->num_altsetting < 2)
+ return -EINVAL;
alts = &iface->altsetting[1];
+ if (get_iface_desc(alts)->bNumEndpoints < 1)
+ return -EINVAL;
ep = get_endpoint(alts, 0)->bEndpointAddress;
err = snd_usb_ctl_msg(chip->dev,
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 9245f52d43bd..44d178ee9177 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -159,6 +159,8 @@ static int init_pitch_v1(struct snd_usb_audio *chip, int iface,
unsigned char data[1];
int err;
+ if (get_iface_desc(alts)->bNumEndpoints < 1)
+ return -EINVAL;
ep = get_endpoint(alts, 0)->bEndpointAddress;
data[0] = 1;
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index b6c0c8e3b450..001fb4dc0722 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -150,6 +150,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
usb_audio_err(chip, "cannot memdup\n");
return -ENOMEM;
}
+ INIT_LIST_HEAD(&fp->list);
if (fp->nr_rates > MAX_NR_RATES) {
kfree(fp);
return -EINVAL;
@@ -167,19 +168,20 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
stream = (fp->endpoint & USB_DIR_IN)
? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
err = snd_usb_add_audio_stream(chip, stream, fp);
- if (err < 0) {
- kfree(fp);
- kfree(rate_table);
- return err;
- }
+ if (err < 0)
+ goto error;
if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber ||
fp->altset_idx >= iface->num_altsetting) {
- kfree(fp);
- kfree(rate_table);
- return -EINVAL;
+ err = -EINVAL;
+ goto error;
}
alts = &iface->altsetting[fp->altset_idx];
altsd = get_iface_desc(alts);
+ if (altsd->bNumEndpoints < 1) {
+ err = -EINVAL;
+ goto error;
+ }
+
fp->protocol = altsd->bInterfaceProtocol;
if (fp->datainterval == 0)
@@ -190,6 +192,12 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
snd_usb_init_pitch(chip, fp->iface, alts, fp);
snd_usb_init_sample_rate(chip, fp->iface, alts, fp, fp->rate_max);
return 0;
+
+ error:
+ list_del(&fp->list); /* unlink for avoiding double-free */
+ kfree(fp);
+ kfree(rate_table);
+ return err;
}
static int create_auto_pcm_quirk(struct snd_usb_audio *chip,
@@ -462,6 +470,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
fp->datainterval = 0;
fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
+ INIT_LIST_HEAD(&fp->list);
switch (fp->maxpacksize) {
case 0x120:
@@ -485,6 +494,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
err = snd_usb_add_audio_stream(chip, stream, fp);
if (err < 0) {
+ list_del(&fp->list); /* unlink for avoiding double-free */
kfree(fp);
return err;
}
@@ -1121,10 +1131,15 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
switch (chip->usb_id) {
case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
+ case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */
+ case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+ case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
+ case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+ case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
return true;
}
@@ -1205,8 +1220,12 @@ void snd_usb_set_interface_quirk(struct usb_device *dev)
* "Playback Design" products need a 50ms delay after setting the
* USB interface.
*/
- if (le16_to_cpu(dev->descriptor.idVendor) == 0x23ba)
+ switch (le16_to_cpu(dev->descriptor.idVendor)) {
+ case 0x23ba: /* Playback Design */
+ case 0x0644: /* TEAC Corp. */
mdelay(50);
+ break;
+ }
}
void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
@@ -1221,6 +1240,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
(requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
mdelay(20);
+ /*
+ * "TEAC Corp." products need a 20ms delay after each
+ * class compliant request
+ */
+ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x0644) &&
+ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ mdelay(20);
+
/* Marantz/Denon devices with USB DAC functionality need a delay
* after each class compliant request
*/
@@ -1269,6 +1296,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */
case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
+ case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
if (fp->altsetting == 2)
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
@@ -1277,6 +1305,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
case USB_ID(0x20b1, 0x3023): /* Aune X1S 32BIT/384 DSD DAC */
+ case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */
if (fp->altsetting == 3)
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 8ee14f2365e7..3b23102230c0 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -316,7 +316,9 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
/*
* add this endpoint to the chip instance.
* if a stream with the same endpoint already exists, append to it.
- * if not, create a new pcm stream.
+ * if not, create a new pcm stream. note, fp is added to the substream
+ * fmt_list and will be freed on the chip instance release. do not free
+ * fp or do remove it from the substream fmt_list to avoid double-free.
*/
int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
int stream,
@@ -677,6 +679,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
* (fp->maxpacksize & 0x7ff);
fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no);
fp->clock = clock;
+ INIT_LIST_HEAD(&fp->list);
/* some quirks for attributes here */
@@ -725,6 +728,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint);
err = snd_usb_add_audio_stream(chip, stream, fp);
if (err < 0) {
+ list_del(&fp->list); /* unlink for avoiding double-free */
kfree(fp->rate_table);
kfree(fp->chmap);
kfree(fp);
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index a8ab79556926..a8c4644022a6 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -5,6 +5,8 @@ PTHREAD_LIBS = -lpthread
WARNINGS = -Wall -Wextra
CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) $(shell getconf LFS_CFLAGS)
+CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
+
all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
%: %.c
$(CC) $(CFLAGS) -o $@ $^
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index 96234b638249..5d51d6ff08e6 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -254,7 +254,7 @@ int main(int argc, char *argv[])
syslog(LOG_ERR, "Illegal op:%d\n", op);
}
vss_msg->error = error;
- len = write(vss_fd, &error, sizeof(struct hv_vss_msg));
+ len = write(vss_fd, vss_msg, sizeof(struct hv_vss_msg));
if (len != sizeof(struct hv_vss_msg)) {
syslog(LOG_ERR, "write failed; error: %d %s", errno,
strerror(errno));
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 2a912df6771b..68276f35e323 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -4968,13 +4968,12 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
sizeof(long) != 8) {
char *p;
- ls = 2;
/* make %l into %ll */
- p = strchr(format, 'l');
- if (p)
+ if (ls == 1 && (p = strchr(format, 'l')))
memmove(p+1, p, strlen(p)+1);
else if (strcmp(format, "%p") == 0)
strcpy(format, "0x%llx");
+ ls = 2;
}
switch (ls) {
case -2:
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 6fc8cd753e1a..a35db828bd0d 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -399,6 +399,9 @@ static void tracepoint_error(struct parse_events_error *e, int err,
{
char help[BUFSIZ];
+ if (!e)
+ return;
+
/*
* We get error directly from syscall errno ( > 0),
* or from encoded pointer's error ( < 0).
@@ -2098,11 +2101,11 @@ char *parse_events_formats_error_string(char *additional_terms)
/* valid terms */
if (additional_terms) {
- if (!asprintf(&str, "valid terms: %s,%s",
- additional_terms, static_terms))
+ if (asprintf(&str, "valid terms: %s,%s",
+ additional_terms, static_terms) < 0)
goto fail;
} else {
- if (!asprintf(&str, "valid terms: %s", static_terms))
+ if (asprintf(&str, "valid terms: %s", static_terms) < 0)
goto fail;
}
return str;
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index e4b173dec4b9..6f2a0279476c 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -283,13 +283,12 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
{
struct dirent *evt_ent;
DIR *event_dir;
- int ret = 0;
event_dir = opendir(dir);
if (!event_dir)
return -EINVAL;
- while (!ret && (evt_ent = readdir(event_dir))) {
+ while ((evt_ent = readdir(event_dir))) {
char path[PATH_MAX];
char *name = evt_ent->d_name;
FILE *file;
@@ -305,17 +304,19 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
snprintf(path, PATH_MAX, "%s/%s", dir, name);
- ret = -EINVAL;
file = fopen(path, "r");
- if (!file)
- break;
+ if (!file) {
+ pr_debug("Cannot open %s\n", path);
+ continue;
+ }
- ret = perf_pmu__new_alias(head, dir, name, file);
+ if (perf_pmu__new_alias(head, dir, name, file) < 0)
+ pr_debug("Cannot set up %s\n", name);
fclose(file);
}
closedir(event_dir);
- return ret;
+ return 0;
}
/*
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index c35ffdd360fe..468de95bc8bb 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -972,7 +972,7 @@ static struct machine *machines__find_for_cpumode(struct machines *machines,
machine = machines__find(machines, pid);
if (!machine)
- machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID);
+ machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
return machine;
}
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 1833103768cb..c8680984d2d6 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -22,6 +22,7 @@ cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
+src_perf = getenv('srctree') + '/tools/perf'
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
@@ -30,6 +31,9 @@ libapikfs = getenv('LIBAPI')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
+# use full paths with source files
+ext_sources = map(lambda x: '%s/%s' % (src_perf, x) , ext_sources)
+
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 2d9d8306dbd3..4a3a72cb5805 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -310,7 +310,6 @@ int perf_stat_process_counter(struct perf_stat_config *config,
int i, ret;
aggr->val = aggr->ena = aggr->run = 0;
- init_stats(ps->res_stats);
if (counter->per_pkg)
zero_per_pkg(counter);
diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
index 77edcdcc016b..057278448515 100755
--- a/tools/testing/selftests/efivarfs/efivarfs.sh
+++ b/tools/testing/selftests/efivarfs/efivarfs.sh
@@ -88,7 +88,11 @@ test_delete()
exit 1
fi
- rm $file
+ rm $file 2>/dev/null
+ if [ $? -ne 0 ]; then
+ chattr -i $file
+ rm $file
+ fi
if [ -e $file ]; then
echo "$file couldn't be deleted" >&2
@@ -111,6 +115,7 @@ test_zero_size_delete()
exit 1
fi
+ chattr -i $file
printf "$attrs" > $file
if [ -e $file ]; then
@@ -141,7 +146,11 @@ test_valid_filenames()
echo "$file could not be created" >&2
ret=1
else
- rm $file
+ rm $file 2>/dev/null
+ if [ $? -ne 0 ]; then
+ chattr -i $file
+ rm $file
+ fi
fi
done
@@ -174,7 +183,11 @@ test_invalid_filenames()
if [ -e $file ]; then
echo "Creating $file should have failed" >&2
- rm $file
+ rm $file 2>/dev/null
+ if [ $? -ne 0 ]; then
+ chattr -i $file
+ rm $file
+ fi
ret=1
fi
done
diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c
index 8c0764407b3c..4af74f733036 100644
--- a/tools/testing/selftests/efivarfs/open-unlink.c
+++ b/tools/testing/selftests/efivarfs/open-unlink.c
@@ -1,10 +1,68 @@
+#include <errno.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
+#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
+#include <linux/fs.h>
+
+static int set_immutable(const char *path, int immutable)
+{
+ unsigned int flags;
+ int fd;
+ int rc;
+ int error;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return fd;
+
+ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
+ if (rc < 0) {
+ error = errno;
+ close(fd);
+ errno = error;
+ return rc;
+ }
+
+ if (immutable)
+ flags |= FS_IMMUTABLE_FL;
+ else
+ flags &= ~FS_IMMUTABLE_FL;
+
+ rc = ioctl(fd, FS_IOC_SETFLAGS, &flags);
+ error = errno;
+ close(fd);
+ errno = error;
+ return rc;
+}
+
+static int get_immutable(const char *path)
+{
+ unsigned int flags;
+ int fd;
+ int rc;
+ int error;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return fd;
+
+ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
+ if (rc < 0) {
+ error = errno;
+ close(fd);
+ errno = error;
+ return rc;
+ }
+ close(fd);
+ if (flags & FS_IMMUTABLE_FL)
+ return 1;
+ return 0;
+}
int main(int argc, char **argv)
{
@@ -27,7 +85,7 @@ int main(int argc, char **argv)
buf[4] = 0;
/* create a test variable */
- fd = open(path, O_WRONLY | O_CREAT);
+ fd = open(path, O_WRONLY | O_CREAT, 0600);
if (fd < 0) {
perror("open(O_WRONLY)");
return EXIT_FAILURE;
@@ -41,6 +99,18 @@ int main(int argc, char **argv)
close(fd);
+ rc = get_immutable(path);
+ if (rc < 0) {
+ perror("ioctl(FS_IOC_GETFLAGS)");
+ return EXIT_FAILURE;
+ } else if (rc) {
+ rc = set_immutable(path, 0);
+ if (rc < 0) {
+ perror("ioctl(FS_IOC_SETFLAGS)");
+ return EXIT_FAILURE;
+ }
+ }
+
fd = open(path, O_RDONLY);
if (fd < 0) {
perror("open");
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 69bca185c471..ea6064696fe4 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -143,7 +143,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
* Check if there was a change in the timer state (should we raise or lower
* the line level to the GIC).
*/
-static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
+static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
@@ -154,10 +154,12 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
* until we call this function from kvm_timer_flush_hwstate.
*/
if (!vgic_initialized(vcpu->kvm))
- return;
+ return -ENODEV;
if (kvm_timer_should_fire(vcpu) != timer->irq.level)
kvm_timer_update_irq(vcpu, !timer->irq.level);
+
+ return 0;
}
/*
@@ -218,7 +220,8 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
bool phys_active;
int ret;
- kvm_timer_update_state(vcpu);
+ if (kvm_timer_update_state(vcpu))
+ return;
/*
* If we enter the guest with the virtual input level to the VGIC
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 7a2f449bd85d..5d10f104f3eb 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1875,8 +1875,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-
- int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
+ int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
+ int sz = nr_longs * sizeof(unsigned long);
vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 77d42be6970e..4f70d12e392d 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -173,7 +173,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
* do alloc nowait since if we are going to sleep anyway we
* may as well sleep faulting in page
*/
- work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
+ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
if (!work)
return 0;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 484079efea5b..fefbf2d148ef 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -547,6 +547,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
if (!kvm)
return ERR_PTR(-ENOMEM);
+ spin_lock_init(&kvm->mmu_lock);
+ atomic_inc(&current->mm->mm_count);
+ kvm->mm = current->mm;
+ kvm_eventfd_init(kvm);
+ mutex_init(&kvm->lock);
+ mutex_init(&kvm->irq_lock);
+ mutex_init(&kvm->slots_lock);
+ atomic_set(&kvm->users_count, 1);
+ INIT_LIST_HEAD(&kvm->devices);
+
r = kvm_arch_init_vm(kvm, type);
if (r)
goto out_err_no_disable;
@@ -579,16 +589,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
goto out_err;
}
- spin_lock_init(&kvm->mmu_lock);
- kvm->mm = current->mm;
- atomic_inc(&kvm->mm->mm_count);
- kvm_eventfd_init(kvm);
- mutex_init(&kvm->lock);
- mutex_init(&kvm->irq_lock);
- mutex_init(&kvm->slots_lock);
- atomic_set(&kvm->users_count, 1);
- INIT_LIST_HEAD(&kvm->devices);
-
r = kvm_init_mmu_notifier(kvm);
if (r)
goto out_err;
@@ -613,6 +613,7 @@ out_err_no_disable:
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
kvm_free_memslots(kvm, kvm->memslots[i]);
kvm_arch_free_vm(kvm);
+ mmdrop(current->mm);
return ERR_PTR(r);
}
@@ -1961,6 +1962,9 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
else
val *= halt_poll_ns_grow;
+ if (val > halt_poll_ns)
+ val = halt_poll_ns;
+
vcpu->halt_poll_ns = val;
trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
}